In [ ]:
import numpy as np
import pandas as pd
from packaging import version
import time
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as MSE
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as k
%matplotlib inline
np.set_printoptions(precision=3, suppress=True)
print("This notebook requires TensorFlow 2.0 or above")
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >=2
print("Keras version: ", keras.__version__)
This notebook requires TensorFlow 2.0 or above TensorFlow version: 2.18.0 Keras version: 3.8.0
In [ ]:
# Loading the cifar10 Dataset
(train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data()
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 170498071/170498071 ━━━━━━━━━━━━━━━━━━━━ 15s 0us/step
In [ ]:
# Exploratory Data Analysis
print(f"train images shape`: {train_images.shape}")
print(f"train labels shape: {train_labels.shape}")
print(f"test images shape: {test_images.shape}")
print(f"test labels shape: {test_labels.shape}")
train images shape`: (50000, 32, 32, 3) train labels shape: (50000, 1) test images shape: (10000, 32, 32, 3) test labels shape: (10000, 1)
In [ ]:
# Explore the labels, labeled as a numerical digit that needs conversion
# to an item description
print(f"First 10 training labels: {train_labels[:10]}")
First 10 training labels: [[6] [9] [9] [4] [1] [1] [2] [7] [8] [3]]
In [ ]:
# Data Analysis Functions
def show_random_examples(x, y, p):
indices = np.random.choice(range(x.shape[0]), 10, replace=False)
x = x[indices]
y = y[indices]
p = p[indices]
plt.figure(figsize=(10, 5))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(x[i])
plt.xticks([])
plt.yticks([])
col = 'green' if np.argmax(y[i]) == np.argmax(p[i]) else 'red'
plt.xlabel(class_names_preview[np.argmax(p[i])], color=col)
plt.show()
def get_three_classes(x, y):
def indices_of(class_id):
indices, _ = np.where(y == float(class_id))
return indices
indices = np.concatenate([indices_of(0), indices_of(1), indices_of(2)], axis=0)
x = x[indices]
y = y[indices]
count = x.shape[0]
indices = np.random.choice(range(count), count, replace=False)
x = x[indices]
y = y[indices]
y = tf.keras.utils.to_categorical(y)
return x, y
def plot_history(history):
losses = history.history['loss']
accs = history.history['accuracy']
val_losses = history.history['val_loss']
val_accs = history.history['val_accuracy']
epochs = len(losses)
plt.figure(figsize=(16, 4))
for i, metrics in enumerate(zip([losses, accs], [val_losses, val_accs], ['Loss', 'Accuracy'])):
plt.subplot(1, 2, i + 1)
plt.plot(range(epochs), metrics[0], label='Training {}'.format(metrics[2]))
plt.plot(range(epochs), metrics[1], label='Validation {}'.format(metrics[2]))
plt.legend()
plt.show()
def display_training_curves(training, validation, title, subplot):
ax = plt.subplot(subplot)
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['training', 'validation'])
def print_validation_report(y_test, predictions):
print("Classification Report")
print(classification_report(y_test, predictions))
print('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
print('Root Mean Square Error: {}'.format(np.sqrt(MSE(y_test, predictions))))
def plot_confusion_matrix(y_true, y_pred):
mtx = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(figsize=(16,12))
sns.heatmap(mtx, annot=True, fmt='d', linewidths=.75, cbar=False, ax=ax,cmap='Blues',linecolor='white')
# square=True,
plt.ylabel('true label')
plt.xlabel('predicted label')
In [ ]:
train_image_preview, train_label_preview = get_three_classes(train_images, train_labels)
test_image_preview, test_label_preview = get_three_classes(test_images, test_labels)
class_names_preview = ['airplane', 'car', 'bird']
show_random_examples(train_image_preview, train_label_preview, train_label_preview)
In [ ]:
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog','frog', 'horse' ,'ship' ,'truck']
In [ ]:
image_train_split, image_val_split, label_train_split, label_val_split = train_test_split(train_images, train_labels, test_size=.1, random_state=42, shuffle=True)
print(image_train_split.shape)
print(image_val_split.shape)
print(label_train_split.shape)
print(label_val_split.shape)
(45000, 32, 32, 3) (5000, 32, 32, 3) (45000, 1) (5000, 1)
In [ ]:
image_train_norm = image_train_split / 255.0
image_val_norm = image_val_split / 255.0
image_test_norm = test_images / 255.0
image_train_norm.shape
Out[ ]:
(45000, 32, 32, 3)
In [ ]:
# Build Model 2-1: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_8'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=8, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.3254 - loss: 1.8834 - val_accuracy: 0.4512 - val_loss: 1.5801 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.4740 - loss: 1.4991 - val_accuracy: 0.4938 - val_loss: 1.4709 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.5024 - loss: 1.4143 - val_accuracy: 0.5060 - val_loss: 1.4343 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5247 - loss: 1.3617 - val_accuracy: 0.5222 - val_loss: 1.3893 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5331 - loss: 1.3317 - val_accuracy: 0.5412 - val_loss: 1.3437 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5447 - loss: 1.3119 - val_accuracy: 0.5492 - val_loss: 1.3248 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.5478 - loss: 1.2959 - val_accuracy: 0.5436 - val_loss: 1.3217 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.5500 - loss: 1.2830 - val_accuracy: 0.5588 - val_loss: 1.2985 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5569 - loss: 1.2697 - val_accuracy: 0.5638 - val_loss: 1.2977 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5542 - loss: 1.2741 - val_accuracy: 0.5518 - val_loss: 1.2975 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5574 - loss: 1.2619 - val_accuracy: 0.5696 - val_loss: 1.2681 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.5700 - loss: 1.2378 - val_accuracy: 0.5688 - val_loss: 1.2598 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5710 - loss: 1.2365 - val_accuracy: 0.5604 - val_loss: 1.2711 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.5656 - loss: 1.2517 - val_accuracy: 0.5622 - val_loss: 1.2742 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.5736 - loss: 1.2652 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step Classification Report precision recall f1-score support 0 0.71 0.54 0.61 1000 1 0.69 0.68 0.69 1000 2 0.40 0.45 0.42 1000 3 0.38 0.49 0.43 1000 4 0.48 0.56 0.51 1000 5 0.53 0.35 0.42 1000 6 0.72 0.64 0.68 1000 7 0.58 0.67 0.62 1000 8 0.75 0.63 0.69 1000 9 0.59 0.66 0.63 1000 accuracy 0.57 10000 macro avg 0.58 0.57 0.57 10000 weighted avg 0.58 0.57 0.57 10000 Accuracy Score: 0.5668 Root Mean Square Error: 2.8229594400203486
In [ ]:
# Build initial data for table
data = {}
data['model'] = ['DNN']
data['accuracy'] = [f"{history.history['accuracy'][-1]:.3f}"]
data['val_accuracy'] = [f"{history.history['val_accuracy'][-1]:.3f}"]
data['test_accuracy'] = [f"{test_pred[1]:.3f}"]
data['loss'] = [f"{history.history['loss'][-1]:.3f}"]
data['val_loss'] = [f"{history.history['val_loss'][-1]:.3f}"]
data['test_loss'] = [f"{test_pred[0]:.3f}"]
data['time'] = [f"{time_end - time_start:.3f}"]
In [ ]:
def add_to_data(data, model, history, test_pred):
data['model'].append(model)
data['accuracy'].append(f"{history.history['accuracy'][-1]:.3f}")
data['val_accuracy'].append(f"{history.history['val_accuracy'][-1]:.3f}")
data['test_accuracy'].append(f"{test_pred[1]:.3f}")
data['loss'].append(f"{history.history['loss'][-1]:.3f}")
data['val_loss'].append(f"{history.history['val_loss'][-1]:.3f}")
data['test_loss'].append(f"{test_pred[0]:.3f}")
data['time'].append(f"{time_end - time_start:.3f}")
In [ ]:
# Build Model 2-1: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_16'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=16, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.3473 - loss: 1.8265 - val_accuracy: 0.5032 - val_loss: 1.4674 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.5132 - loss: 1.4004 - val_accuracy: 0.5448 - val_loss: 1.3570 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.5513 - loss: 1.2912 - val_accuracy: 0.5572 - val_loss: 1.2955 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5714 - loss: 1.2333 - val_accuracy: 0.5794 - val_loss: 1.2424 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5843 - loss: 1.2055 - val_accuracy: 0.5846 - val_loss: 1.2308 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.5938 - loss: 1.1756 - val_accuracy: 0.5920 - val_loss: 1.1989 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6023 - loss: 1.1435 - val_accuracy: 0.5878 - val_loss: 1.2063 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6058 - loss: 1.1326 - val_accuracy: 0.5900 - val_loss: 1.1924 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6106 - loss: 1.1261 - val_accuracy: 0.6054 - val_loss: 1.1510 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.6189 - loss: 1.1019 - val_accuracy: 0.6076 - val_loss: 1.1627 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.6207 - loss: 1.0961 - val_accuracy: 0.6114 - val_loss: 1.1377 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6268 - loss: 1.0697 - val_accuracy: 0.6192 - val_loss: 1.1221 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.6307 - loss: 1.0698 - val_accuracy: 0.6228 - val_loss: 1.1136 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6310 - loss: 1.0649 - val_accuracy: 0.6112 - val_loss: 1.1232 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.6399 - loss: 1.0379 - val_accuracy: 0.6144 - val_loss: 1.1165 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6345 - loss: 1.0475 - val_accuracy: 0.6100 - val_loss: 1.1104 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6214 - loss: 1.0937 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.59 0.74 0.66 1000 1 0.71 0.78 0.74 1000 2 0.50 0.41 0.45 1000 3 0.48 0.39 0.43 1000 4 0.68 0.37 0.48 1000 5 0.48 0.64 0.54 1000 6 0.58 0.83 0.69 1000 7 0.71 0.68 0.69 1000 8 0.79 0.69 0.74 1000 9 0.76 0.66 0.71 1000 accuracy 0.62 10000 macro avg 0.63 0.62 0.61 10000 weighted avg 0.63 0.62 0.61 10000 Accuracy Score: 0.6189 Root Mean Square Error: 2.6270896444544865
In [ ]:
# Build Model 2-3: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_32'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.3781 - loss: 1.7647 - val_accuracy: 0.5180 - val_loss: 1.3971 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.5422 - loss: 1.3151 - val_accuracy: 0.5574 - val_loss: 1.3012 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.5786 - loss: 1.2217 - val_accuracy: 0.5876 - val_loss: 1.2057 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - accuracy: 0.6015 - loss: 1.1559 - val_accuracy: 0.5978 - val_loss: 1.1903 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6152 - loss: 1.1115 - val_accuracy: 0.6118 - val_loss: 1.1505 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6343 - loss: 1.0623 - val_accuracy: 0.6238 - val_loss: 1.1262 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - accuracy: 0.6371 - loss: 1.0452 - val_accuracy: 0.6220 - val_loss: 1.1115 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6466 - loss: 1.0200 - val_accuracy: 0.6318 - val_loss: 1.0780 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6568 - loss: 1.0004 - val_accuracy: 0.6344 - val_loss: 1.0656 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6604 - loss: 0.9811 - val_accuracy: 0.6362 - val_loss: 1.0755 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6612 - loss: 0.9781 - val_accuracy: 0.6306 - val_loss: 1.0636 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6705 - loss: 0.9535 - val_accuracy: 0.6400 - val_loss: 1.0451 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6735 - loss: 0.9423 - val_accuracy: 0.6466 - val_loss: 1.0447 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6776 - loss: 0.9265 - val_accuracy: 0.6388 - val_loss: 1.0443 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6758 - loss: 0.9286 - val_accuracy: 0.6564 - val_loss: 1.0149 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6836 - loss: 0.9163 - val_accuracy: 0.6448 - val_loss: 1.0413 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6837 - loss: 0.9107 - val_accuracy: 0.6474 - val_loss: 1.0390 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6920 - loss: 0.9065 - val_accuracy: 0.6496 - val_loss: 1.0142 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6436 - loss: 1.0291 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.62 0.73 0.67 1000 1 0.76 0.75 0.75 1000 2 0.48 0.55 0.51 1000 3 0.45 0.54 0.49 1000 4 0.55 0.60 0.57 1000 5 0.65 0.35 0.46 1000 6 0.74 0.73 0.74 1000 7 0.78 0.66 0.72 1000 8 0.74 0.77 0.75 1000 9 0.74 0.72 0.73 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.64 10000 weighted avg 0.65 0.64 0.64 10000 Accuracy Score: 0.6401 Root Mean Square Error: 2.5772659932571957
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_64'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 9ms/step - accuracy: 0.3945 - loss: 1.7022 - val_accuracy: 0.5544 - val_loss: 1.3229 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.5636 - loss: 1.2511 - val_accuracy: 0.5914 - val_loss: 1.1967 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6047 - loss: 1.1435 - val_accuracy: 0.5960 - val_loss: 1.1648 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6292 - loss: 1.0650 - val_accuracy: 0.6190 - val_loss: 1.1017 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6523 - loss: 1.0133 - val_accuracy: 0.6206 - val_loss: 1.1015 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6704 - loss: 0.9642 - val_accuracy: 0.6392 - val_loss: 1.0671 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6786 - loss: 0.9340 - val_accuracy: 0.6450 - val_loss: 1.0479 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6826 - loss: 0.9170 - val_accuracy: 0.6448 - val_loss: 1.0263 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6935 - loss: 0.8880 - val_accuracy: 0.6500 - val_loss: 1.0198 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7052 - loss: 0.8639 - val_accuracy: 0.6440 - val_loss: 1.0176 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7041 - loss: 0.8513 - val_accuracy: 0.6448 - val_loss: 1.0176 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.7054 - loss: 0.8507 - val_accuracy: 0.6600 - val_loss: 0.9787 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.7116 - loss: 0.8273 - val_accuracy: 0.6502 - val_loss: 1.0071 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.7166 - loss: 0.8203 - val_accuracy: 0.6522 - val_loss: 1.0018 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.7141 - loss: 0.8151 - val_accuracy: 0.6644 - val_loss: 0.9656 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7236 - loss: 0.8032 - val_accuracy: 0.6524 - val_loss: 1.0016 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.7273 - loss: 0.7774 - val_accuracy: 0.6566 - val_loss: 0.9816 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.7310 - loss: 0.7799 - val_accuracy: 0.6622 - val_loss: 0.9679 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6562 - loss: 0.9825 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.67 0.65 0.66 1000 1 0.83 0.70 0.76 1000 2 0.49 0.54 0.51 1000 3 0.55 0.40 0.46 1000 4 0.61 0.61 0.61 1000 5 0.58 0.53 0.56 1000 6 0.71 0.80 0.75 1000 7 0.73 0.73 0.73 1000 8 0.66 0.83 0.74 1000 9 0.72 0.77 0.74 1000 accuracy 0.66 10000 macro avg 0.66 0.66 0.65 10000 weighted avg 0.66 0.66 0.65 10000 Accuracy Score: 0.6559 Root Mean Square Error: 2.5302964253225353
In [ ]:
# Build Model 2-5: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_128'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 12ms/step - accuracy: 0.4039 - loss: 1.6900 - val_accuracy: 0.5378 - val_loss: 1.3547 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5700 - loss: 1.2423 - val_accuracy: 0.5920 - val_loss: 1.2093 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6135 - loss: 1.1165 - val_accuracy: 0.6038 - val_loss: 1.1598 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6361 - loss: 1.0426 - val_accuracy: 0.6224 - val_loss: 1.1094 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6596 - loss: 0.9776 - val_accuracy: 0.6268 - val_loss: 1.0803 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6824 - loss: 0.9275 - val_accuracy: 0.6436 - val_loss: 1.0367 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6992 - loss: 0.8805 - val_accuracy: 0.6620 - val_loss: 1.0060 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7084 - loss: 0.8425 - val_accuracy: 0.6368 - val_loss: 1.0383 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7180 - loss: 0.8166 - val_accuracy: 0.6378 - val_loss: 1.0222 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7251 - loss: 0.7920 - val_accuracy: 0.6422 - val_loss: 1.0190 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6511 - loss: 1.0319 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.75 0.60 0.67 1000 1 0.71 0.83 0.77 1000 2 0.59 0.37 0.46 1000 3 0.48 0.46 0.47 1000 4 0.62 0.58 0.60 1000 5 0.53 0.61 0.57 1000 6 0.63 0.84 0.72 1000 7 0.64 0.79 0.71 1000 8 0.84 0.68 0.75 1000 9 0.74 0.74 0.74 1000 accuracy 0.65 10000 macro avg 0.65 0.65 0.64 10000 weighted avg 0.65 0.65 0.64 10000 Accuracy Score: 0.6498 Root Mean Square Error: 2.5028983199482955
In [ ]:
# Build Model 2-5: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_256'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 13s 14ms/step - accuracy: 0.4030 - loss: 1.6863 - val_accuracy: 0.5580 - val_loss: 1.2902 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 9ms/step - accuracy: 0.5761 - loss: 1.2200 - val_accuracy: 0.5878 - val_loss: 1.2253 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.6100 - loss: 1.1202 - val_accuracy: 0.5844 - val_loss: 1.2006 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.6465 - loss: 1.0253 - val_accuracy: 0.6212 - val_loss: 1.1092 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6759 - loss: 0.9436 - val_accuracy: 0.6446 - val_loss: 1.0457 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6926 - loss: 0.8823 - val_accuracy: 0.6410 - val_loss: 1.0553 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.7055 - loss: 0.8451 - val_accuracy: 0.6492 - val_loss: 1.0120 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.7247 - loss: 0.8000 - val_accuracy: 0.6542 - val_loss: 1.0018 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 9ms/step - accuracy: 0.7295 - loss: 0.7779 - val_accuracy: 0.6352 - val_loss: 1.0549 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.7370 - loss: 0.7524 - val_accuracy: 0.6526 - val_loss: 1.0065 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.7547 - loss: 0.7054 - val_accuracy: 0.6460 - val_loss: 1.0338 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6345 - loss: 1.0640 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.71 0.60 0.65 1000 1 0.80 0.74 0.77 1000 2 0.43 0.59 0.50 1000 3 0.47 0.43 0.45 1000 4 0.61 0.50 0.55 1000 5 0.56 0.51 0.53 1000 6 0.61 0.82 0.70 1000 7 0.79 0.66 0.71 1000 8 0.82 0.70 0.75 1000 9 0.67 0.78 0.72 1000 accuracy 0.63 10000 macro avg 0.65 0.63 0.63 10000 weighted avg 0.65 0.63 0.63 10000 Accuracy Score: 0.6334 Root Mean Square Error: 2.5644297611749867
In [ ]:
# Build Model 2-7: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_512'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 22ms/step - accuracy: 0.4036 - loss: 1.7000 - val_accuracy: 0.5482 - val_loss: 1.3003 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 16ms/step - accuracy: 0.5810 - loss: 1.2165 - val_accuracy: 0.5818 - val_loss: 1.2270 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 16ms/step - accuracy: 0.6226 - loss: 1.0928 - val_accuracy: 0.5672 - val_loss: 1.2578 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 16ms/step - accuracy: 0.6486 - loss: 1.0229 - val_accuracy: 0.6124 - val_loss: 1.1316 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 16ms/step - accuracy: 0.6692 - loss: 0.9536 - val_accuracy: 0.6254 - val_loss: 1.0709 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 16ms/step - accuracy: 0.6952 - loss: 0.8885 - val_accuracy: 0.6378 - val_loss: 1.0625 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 16ms/step - accuracy: 0.7101 - loss: 0.8376 - val_accuracy: 0.6408 - val_loss: 1.0425 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 16ms/step - accuracy: 0.7264 - loss: 0.7863 - val_accuracy: 0.6390 - val_loss: 1.0509 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 21s 16ms/step - accuracy: 0.7408 - loss: 0.7500 - val_accuracy: 0.6584 - val_loss: 1.0029 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 16ms/step - accuracy: 0.7531 - loss: 0.7191 - val_accuracy: 0.6370 - val_loss: 1.0672 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 21s 16ms/step - accuracy: 0.7594 - loss: 0.6907 - val_accuracy: 0.6572 - val_loss: 1.0042 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 16ms/step - accuracy: 0.7723 - loss: 0.6594 - val_accuracy: 0.6430 - val_loss: 1.0652 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6335 - loss: 1.0966 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.66 0.65 0.66 1000 1 0.69 0.83 0.76 1000 2 0.51 0.48 0.50 1000 3 0.46 0.52 0.49 1000 4 0.65 0.49 0.56 1000 5 0.62 0.41 0.49 1000 6 0.65 0.80 0.72 1000 7 0.73 0.68 0.70 1000 8 0.78 0.69 0.73 1000 9 0.63 0.80 0.70 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.63 10000 weighted avg 0.64 0.64 0.63 10000 Accuracy Score: 0.6353 Root Mean Square Error: 2.63897707454991
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
In [ ]:
# Build Model 2-8: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_32-2.2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 8ms/step - accuracy: 0.3688 - loss: 1.7767 - val_accuracy: 0.5204 - val_loss: 1.4312 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5388 - loss: 1.3298 - val_accuracy: 0.5614 - val_loss: 1.3113 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5710 - loss: 1.2361 - val_accuracy: 0.5774 - val_loss: 1.2541 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.5897 - loss: 1.1860 - val_accuracy: 0.5852 - val_loss: 1.2188 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6034 - loss: 1.1488 - val_accuracy: 0.6058 - val_loss: 1.1890 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6103 - loss: 1.1251 - val_accuracy: 0.6104 - val_loss: 1.1726 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 4ms/step - accuracy: 0.6243 - loss: 1.0918 - val_accuracy: 0.6138 - val_loss: 1.1570 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6315 - loss: 1.0696 - val_accuracy: 0.6188 - val_loss: 1.1385 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6329 - loss: 1.0644 - val_accuracy: 0.6208 - val_loss: 1.1251 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6348 - loss: 1.0450 - val_accuracy: 0.6176 - val_loss: 1.1284 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6370 - loss: 1.0509 - val_accuracy: 0.6264 - val_loss: 1.1080 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6429 - loss: 1.0230 - val_accuracy: 0.6268 - val_loss: 1.1013 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6532 - loss: 1.0128 - val_accuracy: 0.6220 - val_loss: 1.1030 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 8ms/step - accuracy: 0.6582 - loss: 0.9991 - val_accuracy: 0.6292 - val_loss: 1.0958 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.6594 - loss: 0.9873 - val_accuracy: 0.6248 - val_loss: 1.0996 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6573 - loss: 0.9911 - val_accuracy: 0.6328 - val_loss: 1.0842 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6646 - loss: 0.9759 - val_accuracy: 0.6318 - val_loss: 1.0728 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6644 - loss: 0.9746 - val_accuracy: 0.6316 - val_loss: 1.0777 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6615 - loss: 0.9768 - val_accuracy: 0.6316 - val_loss: 1.0770 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6321 - loss: 1.0671 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step Classification Report precision recall f1-score support 0 0.72 0.62 0.67 1000 1 0.74 0.76 0.75 1000 2 0.51 0.44 0.47 1000 3 0.48 0.42 0.45 1000 4 0.58 0.50 0.53 1000 5 0.62 0.44 0.51 1000 6 0.58 0.85 0.69 1000 7 0.72 0.70 0.71 1000 8 0.64 0.81 0.72 1000 9 0.67 0.73 0.70 1000 accuracy 0.63 10000 macro avg 0.63 0.63 0.62 10000 weighted avg 0.63 0.63 0.62 10000 Accuracy Score: 0.6277 Root Mean Square Error: 2.6621983397185116
In [ ]:
# Build Model 2-9: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_32-4.4'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 7ms/step - accuracy: 0.3678 - loss: 1.7726 - val_accuracy: 0.5088 - val_loss: 1.4260 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - accuracy: 0.5316 - loss: 1.3425 - val_accuracy: 0.5562 - val_loss: 1.2994 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5678 - loss: 1.2316 - val_accuracy: 0.5690 - val_loss: 1.2554 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5960 - loss: 1.1560 - val_accuracy: 0.5966 - val_loss: 1.1786 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6147 - loss: 1.1066 - val_accuracy: 0.6138 - val_loss: 1.1346 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6279 - loss: 1.0779 - val_accuracy: 0.6134 - val_loss: 1.1088 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6361 - loss: 1.0476 - val_accuracy: 0.6176 - val_loss: 1.1176 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6427 - loss: 1.0305 - val_accuracy: 0.6220 - val_loss: 1.1074 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6480 - loss: 1.0097 - val_accuracy: 0.6358 - val_loss: 1.0586 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6611 - loss: 0.9865 - val_accuracy: 0.6260 - val_loss: 1.0675 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.6597 - loss: 0.9787 - val_accuracy: 0.6232 - val_loss: 1.1084 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6670 - loss: 0.9689 - val_accuracy: 0.6370 - val_loss: 1.0510 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6716 - loss: 0.9546 - val_accuracy: 0.6404 - val_loss: 1.0396 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.6669 - loss: 0.9576 - val_accuracy: 0.6356 - val_loss: 1.0357 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6746 - loss: 0.9338 - val_accuracy: 0.6510 - val_loss: 1.0267 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6775 - loss: 0.9375 - val_accuracy: 0.6506 - val_loss: 1.0138 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6800 - loss: 0.9308 - val_accuracy: 0.6436 - val_loss: 1.0460 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6794 - loss: 0.9294 - val_accuracy: 0.6362 - val_loss: 1.0538 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6458 - loss: 1.0414 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.73 0.61 0.67 1000 1 0.75 0.78 0.76 1000 2 0.60 0.37 0.46 1000 3 0.55 0.32 0.40 1000 4 0.48 0.70 0.57 1000 5 0.52 0.62 0.57 1000 6 0.63 0.83 0.72 1000 7 0.66 0.73 0.69 1000 8 0.81 0.73 0.77 1000 9 0.74 0.73 0.73 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.63 10000 weighted avg 0.65 0.64 0.63 10000 Accuracy Score: 0.6409 Root Mean Square Error: 2.4692306494128897
In [ ]:
# Build Model 2-10: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_32-5.5'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 9ms/step - accuracy: 0.3618 - loss: 1.7763 - val_accuracy: 0.5220 - val_loss: 1.3938 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5309 - loss: 1.3413 - val_accuracy: 0.5344 - val_loss: 1.3127 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5647 - loss: 1.2488 - val_accuracy: 0.5850 - val_loss: 1.2264 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5970 - loss: 1.1745 - val_accuracy: 0.5922 - val_loss: 1.1701 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6136 - loss: 1.1153 - val_accuracy: 0.5654 - val_loss: 1.2258 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6234 - loss: 1.0813 - val_accuracy: 0.6142 - val_loss: 1.1338 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6355 - loss: 1.0605 - val_accuracy: 0.6282 - val_loss: 1.0941 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.6475 - loss: 1.0169 - val_accuracy: 0.6266 - val_loss: 1.0800 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6534 - loss: 1.0053 - val_accuracy: 0.6324 - val_loss: 1.0710 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6562 - loss: 1.0038 - val_accuracy: 0.6172 - val_loss: 1.0899 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6604 - loss: 0.9815 - val_accuracy: 0.6350 - val_loss: 1.0596 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6694 - loss: 0.9637 - val_accuracy: 0.6418 - val_loss: 1.0313 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6709 - loss: 0.9509 - val_accuracy: 0.6360 - val_loss: 1.0612 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6742 - loss: 0.9415 - val_accuracy: 0.6392 - val_loss: 1.0437 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6789 - loss: 0.9261 - val_accuracy: 0.6386 - val_loss: 1.0426 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6446 - loss: 1.0463 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.62 0.70 0.66 1000 1 0.75 0.79 0.77 1000 2 0.64 0.36 0.46 1000 3 0.47 0.46 0.46 1000 4 0.52 0.66 0.58 1000 5 0.62 0.46 0.53 1000 6 0.59 0.85 0.70 1000 7 0.79 0.68 0.73 1000 8 0.74 0.75 0.75 1000 9 0.75 0.70 0.72 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.64 10000 weighted avg 0.65 0.64 0.64 10000 Accuracy Score: 0.6411 Root Mean Square Error: 2.5542709331627296
In [ ]:
# Build Model 2-11: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_64-2.2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(2, 2), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 13s 8ms/step - accuracy: 0.3773 - loss: 1.7606 - val_accuracy: 0.5342 - val_loss: 1.3675 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.5525 - loss: 1.2938 - val_accuracy: 0.5704 - val_loss: 1.2658 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5880 - loss: 1.1903 - val_accuracy: 0.5796 - val_loss: 1.2386 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6140 - loss: 1.1255 - val_accuracy: 0.5898 - val_loss: 1.1889 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.6235 - loss: 1.0961 - val_accuracy: 0.6090 - val_loss: 1.1588 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.6395 - loss: 1.0452 - val_accuracy: 0.6146 - val_loss: 1.1270 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 7ms/step - accuracy: 0.6409 - loss: 1.0356 - val_accuracy: 0.6122 - val_loss: 1.1421 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6522 - loss: 1.0070 - val_accuracy: 0.5524 - val_loss: 1.2644 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6586 - loss: 0.9967 - val_accuracy: 0.6146 - val_loss: 1.1192 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6222 - loss: 1.1082 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.72 0.61 0.66 1000 1 0.79 0.66 0.72 1000 2 0.56 0.35 0.43 1000 3 0.48 0.37 0.41 1000 4 0.54 0.54 0.54 1000 5 0.47 0.66 0.55 1000 6 0.66 0.73 0.69 1000 7 0.64 0.73 0.68 1000 8 0.73 0.76 0.74 1000 9 0.62 0.78 0.69 1000 accuracy 0.62 10000 macro avg 0.62 0.62 0.61 10000 weighted avg 0.62 0.62 0.61 10000 Accuracy Score: 0.6176 Root Mean Square Error: 2.6573483023495434
In [ ]:
# Build Model 2-12: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_64-4.4'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(4, 4), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.3928 - loss: 1.7050 - val_accuracy: 0.5148 - val_loss: 1.3799 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5510 - loss: 1.2857 - val_accuracy: 0.5578 - val_loss: 1.2481 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5981 - loss: 1.1673 - val_accuracy: 0.5666 - val_loss: 1.2722 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6210 - loss: 1.0951 - val_accuracy: 0.6034 - val_loss: 1.1365 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6382 - loss: 1.0484 - val_accuracy: 0.6122 - val_loss: 1.1287 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6522 - loss: 0.9985 - val_accuracy: 0.6214 - val_loss: 1.0773 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6651 - loss: 0.9734 - val_accuracy: 0.6258 - val_loss: 1.0739 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6744 - loss: 0.9484 - val_accuracy: 0.6414 - val_loss: 1.0238 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.6840 - loss: 0.9135 - val_accuracy: 0.6386 - val_loss: 1.0418 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6905 - loss: 0.8962 - val_accuracy: 0.6252 - val_loss: 1.0777 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6953 - loss: 0.8789 - val_accuracy: 0.6208 - val_loss: 1.0809 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6169 - loss: 1.0932 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.66 0.65 0.65 1000 1 0.82 0.69 0.75 1000 2 0.40 0.64 0.49 1000 3 0.43 0.49 0.45 1000 4 0.65 0.39 0.49 1000 5 0.45 0.66 0.54 1000 6 0.77 0.69 0.73 1000 7 0.79 0.60 0.68 1000 8 0.77 0.73 0.75 1000 9 0.82 0.61 0.70 1000 accuracy 0.61 10000 macro avg 0.66 0.61 0.62 10000 weighted avg 0.66 0.61 0.62 10000 Accuracy Score: 0.6146 Root Mean Square Error: 2.53327850817868
In [ ]:
# Build Model 2-13: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_64-5.5'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(5, 5), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 8ms/step - accuracy: 0.3703 - loss: 1.7598 - val_accuracy: 0.5092 - val_loss: 1.4101 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5373 - loss: 1.3190 - val_accuracy: 0.5582 - val_loss: 1.2927 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.5796 - loss: 1.2112 - val_accuracy: 0.5734 - val_loss: 1.2255 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6059 - loss: 1.1305 - val_accuracy: 0.5820 - val_loss: 1.1928 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6213 - loss: 1.0856 - val_accuracy: 0.6218 - val_loss: 1.1085 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6428 - loss: 1.0249 - val_accuracy: 0.6302 - val_loss: 1.0709 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6608 - loss: 0.9802 - val_accuracy: 0.6378 - val_loss: 1.0637 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6614 - loss: 0.9704 - val_accuracy: 0.6170 - val_loss: 1.1175 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6661 - loss: 0.9571 - val_accuracy: 0.6322 - val_loss: 1.0562 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6816 - loss: 0.9235 - val_accuracy: 0.6334 - val_loss: 1.0501 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6353 - loss: 1.0491 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.66 0.68 0.67 1000 1 0.81 0.72 0.76 1000 2 0.52 0.46 0.49 1000 3 0.47 0.44 0.46 1000 4 0.45 0.72 0.55 1000 5 0.65 0.41 0.51 1000 6 0.77 0.69 0.73 1000 7 0.68 0.73 0.71 1000 8 0.81 0.70 0.75 1000 9 0.66 0.80 0.72 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.63 10000 weighted avg 0.65 0.64 0.63 10000 Accuracy Score: 0.6352 Root Mean Square Error: 2.5438946519067964
In [ ]:
# Build Model 2-14: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_128-2.2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(2, 2), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.3963 - loss: 1.7154 - val_accuracy: 0.5562 - val_loss: 1.3285 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5651 - loss: 1.2422 - val_accuracy: 0.5564 - val_loss: 1.2822 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6018 - loss: 1.1450 - val_accuracy: 0.5870 - val_loss: 1.2152 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6237 - loss: 1.0816 - val_accuracy: 0.6016 - val_loss: 1.1655 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6460 - loss: 1.0313 - val_accuracy: 0.5770 - val_loss: 1.1958 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6581 - loss: 0.9908 - val_accuracy: 0.5966 - val_loss: 1.1499 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6665 - loss: 0.9667 - val_accuracy: 0.6338 - val_loss: 1.0903 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6736 - loss: 0.9447 - val_accuracy: 0.6330 - val_loss: 1.0787 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6853 - loss: 0.9159 - val_accuracy: 0.6372 - val_loss: 1.0710 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6889 - loss: 0.8906 - val_accuracy: 0.6366 - val_loss: 1.0595 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7028 - loss: 0.8672 - val_accuracy: 0.6364 - val_loss: 1.0660 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7037 - loss: 0.8442 - val_accuracy: 0.6482 - val_loss: 1.0317 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7059 - loss: 0.8413 - val_accuracy: 0.6338 - val_loss: 1.0654 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7213 - loss: 0.8065 - val_accuracy: 0.6466 - val_loss: 1.0276 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7179 - loss: 0.8041 - val_accuracy: 0.6266 - val_loss: 1.0596 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6287 - loss: 1.0601 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.58 0.76 0.66 1000 1 0.71 0.82 0.76 1000 2 0.45 0.54 0.49 1000 3 0.48 0.40 0.44 1000 4 0.54 0.58 0.56 1000 5 0.59 0.46 0.52 1000 6 0.62 0.83 0.71 1000 7 0.82 0.61 0.70 1000 8 0.83 0.64 0.72 1000 9 0.80 0.63 0.70 1000 accuracy 0.63 10000 macro avg 0.64 0.63 0.63 10000 weighted avg 0.64 0.63 0.63 10000 Accuracy Score: 0.6273 Root Mean Square Error: 2.6697565432076384
In [ ]:
# Build Model 2-15: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_128-4.4'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(4,4), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.3858 - loss: 1.7222 - val_accuracy: 0.5450 - val_loss: 1.3301 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5552 - loss: 1.2711 - val_accuracy: 0.5652 - val_loss: 1.2445 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6016 - loss: 1.1490 - val_accuracy: 0.5952 - val_loss: 1.1736 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6257 - loss: 1.0781 - val_accuracy: 0.6084 - val_loss: 1.1465 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6494 - loss: 1.0199 - val_accuracy: 0.6200 - val_loss: 1.1034 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6643 - loss: 0.9652 - val_accuracy: 0.6110 - val_loss: 1.1248 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6765 - loss: 0.9348 - val_accuracy: 0.6354 - val_loss: 1.0642 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6872 - loss: 0.8975 - val_accuracy: 0.6454 - val_loss: 1.0333 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7016 - loss: 0.8639 - val_accuracy: 0.6402 - val_loss: 1.0399 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7130 - loss: 0.8258 - val_accuracy: 0.6472 - val_loss: 1.0304 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7154 - loss: 0.8212 - val_accuracy: 0.6432 - val_loss: 1.0237 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7256 - loss: 0.7910 - val_accuracy: 0.6426 - val_loss: 1.0358 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7315 - loss: 0.7749 - val_accuracy: 0.6586 - val_loss: 1.0101 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7414 - loss: 0.7407 - val_accuracy: 0.6416 - val_loss: 1.0653 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7503 - loss: 0.7314 - val_accuracy: 0.6356 - val_loss: 1.0841 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7524 - loss: 0.7018 - val_accuracy: 0.6374 - val_loss: 1.0786 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6349 - loss: 1.1047 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.57 0.77 0.65 1000 1 0.71 0.80 0.76 1000 2 0.54 0.46 0.49 1000 3 0.47 0.43 0.45 1000 4 0.64 0.51 0.56 1000 5 0.57 0.53 0.55 1000 6 0.62 0.82 0.70 1000 7 0.81 0.60 0.69 1000 8 0.74 0.76 0.75 1000 9 0.73 0.69 0.71 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.63 10000 weighted avg 0.64 0.64 0.63 10000 Accuracy Score: 0.6359 Root Mean Square Error: 2.633571719167716
In [ ]:
# Build Model 2-16: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_128-5.5'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(5,5), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 11ms/step - accuracy: 0.3837 - loss: 1.7397 - val_accuracy: 0.5346 - val_loss: 1.3491 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5489 - loss: 1.3056 - val_accuracy: 0.5648 - val_loss: 1.2526 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5898 - loss: 1.1832 - val_accuracy: 0.5834 - val_loss: 1.1942 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6133 - loss: 1.1088 - val_accuracy: 0.5960 - val_loss: 1.1702 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6361 - loss: 1.0453 - val_accuracy: 0.6060 - val_loss: 1.1208 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6578 - loss: 0.9870 - val_accuracy: 0.6150 - val_loss: 1.1127 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6689 - loss: 0.9546 - val_accuracy: 0.6226 - val_loss: 1.0826 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6803 - loss: 0.9282 - val_accuracy: 0.6360 - val_loss: 1.0738 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6852 - loss: 0.9064 - val_accuracy: 0.6158 - val_loss: 1.0912 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6972 - loss: 0.8745 - val_accuracy: 0.6094 - val_loss: 1.1263 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7036 - loss: 0.8574 - val_accuracy: 0.6470 - val_loss: 1.0366 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7097 - loss: 0.8321 - val_accuracy: 0.6350 - val_loss: 1.0626 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7195 - loss: 0.8036 - val_accuracy: 0.6432 - val_loss: 1.0597 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7246 - loss: 0.7944 - val_accuracy: 0.6316 - val_loss: 1.0683 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6437 - loss: 1.0588 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.62 0.73 0.67 1000 1 0.80 0.75 0.78 1000 2 0.58 0.42 0.49 1000 3 0.50 0.44 0.47 1000 4 0.51 0.64 0.57 1000 5 0.58 0.51 0.54 1000 6 0.59 0.85 0.70 1000 7 0.72 0.69 0.70 1000 8 0.84 0.69 0.76 1000 9 0.76 0.71 0.73 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.64 10000 weighted avg 0.65 0.64 0.64 10000 Accuracy Score: 0.6424 Root Mean Square Error: 2.5385625854014315
In [ ]:
# Build Model 2-17: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=16, kernel_size=(3,3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=16, kernel_size=(3,3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 9ms/step - accuracy: 0.2619 - loss: 2.0019 - val_accuracy: 0.4628 - val_loss: 1.6368 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.4498 - loss: 1.5295 - val_accuracy: 0.5096 - val_loss: 1.4945 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.4899 - loss: 1.4331 - val_accuracy: 0.5270 - val_loss: 1.4198 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5216 - loss: 1.3540 - val_accuracy: 0.5526 - val_loss: 1.3674 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5293 - loss: 1.3230 - val_accuracy: 0.5666 - val_loss: 1.3325 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5464 - loss: 1.2886 - val_accuracy: 0.5856 - val_loss: 1.2964 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5544 - loss: 1.2610 - val_accuracy: 0.5894 - val_loss: 1.2844 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5592 - loss: 1.2432 - val_accuracy: 0.5710 - val_loss: 1.2728 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.5660 - loss: 1.2300 - val_accuracy: 0.6018 - val_loss: 1.2442 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.5669 - loss: 1.2162 - val_accuracy: 0.6044 - val_loss: 1.2158 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5721 - loss: 1.2129 - val_accuracy: 0.5858 - val_loss: 1.2454 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5763 - loss: 1.1965 - val_accuracy: 0.6088 - val_loss: 1.2181 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5795 - loss: 1.1938 - val_accuracy: 0.6218 - val_loss: 1.1841 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5847 - loss: 1.1775 - val_accuracy: 0.5988 - val_loss: 1.1922 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5880 - loss: 1.1794 - val_accuracy: 0.6134 - val_loss: 1.1736 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5908 - loss: 1.1609 - val_accuracy: 0.6168 - val_loss: 1.1839 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6279 - loss: 1.1887 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step Classification Report precision recall f1-score support 0 0.67 0.65 0.66 1000 1 0.78 0.74 0.76 1000 2 0.55 0.45 0.49 1000 3 0.46 0.48 0.47 1000 4 0.52 0.63 0.57 1000 5 0.56 0.51 0.54 1000 6 0.64 0.81 0.72 1000 7 0.78 0.61 0.69 1000 8 0.62 0.81 0.70 1000 9 0.83 0.60 0.70 1000 accuracy 0.63 10000 macro avg 0.64 0.63 0.63 10000 weighted avg 0.64 0.63 0.63 10000 Accuracy Score: 0.6298 Root Mean Square Error: 2.5800775182152957
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_CNN_DO_MP_DO_64_64'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 17ms/step - accuracy: 0.3441 - loss: 1.7953 - val_accuracy: 0.5262 - val_loss: 1.4136 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.5422 - loss: 1.2989 - val_accuracy: 0.5820 - val_loss: 1.2772 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5962 - loss: 1.1523 - val_accuracy: 0.6346 - val_loss: 1.1458 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6250 - loss: 1.0767 - val_accuracy: 0.6340 - val_loss: 1.1225 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6400 - loss: 1.0259 - val_accuracy: 0.6640 - val_loss: 1.0534 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6578 - loss: 0.9897 - val_accuracy: 0.6716 - val_loss: 1.0109 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6695 - loss: 0.9533 - val_accuracy: 0.6766 - val_loss: 0.9946 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6747 - loss: 0.9251 - val_accuracy: 0.6616 - val_loss: 1.0050 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6856 - loss: 0.9081 - val_accuracy: 0.6850 - val_loss: 0.9449 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6900 - loss: 0.8935 - val_accuracy: 0.6916 - val_loss: 0.9607 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6978 - loss: 0.8687 - val_accuracy: 0.6966 - val_loss: 0.9219 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6968 - loss: 0.8719 - val_accuracy: 0.7054 - val_loss: 0.9161 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6973 - loss: 0.8670 - val_accuracy: 0.7156 - val_loss: 0.8970 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7065 - loss: 0.8388 - val_accuracy: 0.7154 - val_loss: 0.9180 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.7078 - loss: 0.8429 - val_accuracy: 0.7092 - val_loss: 0.9020 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.7122 - loss: 0.8271 - val_accuracy: 0.7114 - val_loss: 0.9123 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7069 - loss: 0.9190 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.76 0.71 0.74 1000 1 0.89 0.77 0.83 1000 2 0.62 0.58 0.60 1000 3 0.57 0.42 0.49 1000 4 0.60 0.70 0.65 1000 5 0.61 0.67 0.64 1000 6 0.63 0.89 0.74 1000 7 0.86 0.66 0.75 1000 8 0.80 0.83 0.81 1000 9 0.78 0.80 0.79 1000 accuracy 0.70 10000 macro avg 0.71 0.70 0.70 10000 weighted avg 0.71 0.70 0.70 10000 Accuracy Score: 0.7042 Root Mean Square Error: 2.2513551474611906
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_CNN_DO_MP_DO_64_128'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 13s 12ms/step - accuracy: 0.3676 - loss: 1.7470 - val_accuracy: 0.5384 - val_loss: 1.4043 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5711 - loss: 1.2219 - val_accuracy: 0.5818 - val_loss: 1.2311 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6243 - loss: 1.0822 - val_accuracy: 0.6524 - val_loss: 1.0748 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6572 - loss: 0.9890 - val_accuracy: 0.6560 - val_loss: 1.0504 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6798 - loss: 0.9328 - val_accuracy: 0.6824 - val_loss: 0.9780 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6947 - loss: 0.8840 - val_accuracy: 0.6750 - val_loss: 0.9667 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7024 - loss: 0.8646 - val_accuracy: 0.6772 - val_loss: 0.9514 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7136 - loss: 0.8309 - val_accuracy: 0.7168 - val_loss: 0.8910 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7195 - loss: 0.8069 - val_accuracy: 0.7042 - val_loss: 0.8997 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7272 - loss: 0.7896 - val_accuracy: 0.7186 - val_loss: 0.8802 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7345 - loss: 0.7686 - val_accuracy: 0.7302 - val_loss: 0.8580 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7353 - loss: 0.7591 - val_accuracy: 0.7226 - val_loss: 0.8681 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7422 - loss: 0.7402 - val_accuracy: 0.7186 - val_loss: 0.8349 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7448 - loss: 0.7323 - val_accuracy: 0.7088 - val_loss: 0.8800 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7122 - loss: 0.8789 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.83 0.64 0.72 1000 1 0.91 0.78 0.84 1000 2 0.66 0.54 0.59 1000 3 0.52 0.55 0.53 1000 4 0.54 0.80 0.65 1000 5 0.71 0.54 0.61 1000 6 0.70 0.86 0.77 1000 7 0.87 0.66 0.75 1000 8 0.75 0.89 0.81 1000 9 0.77 0.84 0.80 1000 accuracy 0.71 10000 macro avg 0.73 0.71 0.71 10000 weighted avg 0.73 0.71 0.71 10000 Accuracy Score: 0.7084 Root Mean Square Error: 2.224792125120907
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_CNN_DO_MP_DO_64_256'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 14s 12ms/step - accuracy: 0.3696 - loss: 1.7344 - val_accuracy: 0.5622 - val_loss: 1.3121 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5910 - loss: 1.1695 - val_accuracy: 0.6002 - val_loss: 1.1910 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6473 - loss: 1.0187 - val_accuracy: 0.6588 - val_loss: 1.0320 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6713 - loss: 0.9528 - val_accuracy: 0.6878 - val_loss: 0.9660 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7001 - loss: 0.8726 - val_accuracy: 0.6802 - val_loss: 0.9566 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7163 - loss: 0.8179 - val_accuracy: 0.7128 - val_loss: 0.9227 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7281 - loss: 0.7816 - val_accuracy: 0.7000 - val_loss: 0.9095 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7400 - loss: 0.7505 - val_accuracy: 0.6974 - val_loss: 0.9004 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7488 - loss: 0.7284 - val_accuracy: 0.7298 - val_loss: 0.8405 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7564 - loss: 0.7050 - val_accuracy: 0.7314 - val_loss: 0.7998 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7663 - loss: 0.6756 - val_accuracy: 0.7372 - val_loss: 0.8064 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7708 - loss: 0.6624 - val_accuracy: 0.7200 - val_loss: 0.8165 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7743 - loss: 0.6441 - val_accuracy: 0.6894 - val_loss: 0.8961 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7781 - loss: 0.6347 - val_accuracy: 0.7402 - val_loss: 0.7693 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7820 - loss: 0.6149 - val_accuracy: 0.7298 - val_loss: 0.8037 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7918 - loss: 0.5996 - val_accuracy: 0.7396 - val_loss: 0.7716 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7921 - loss: 0.5946 - val_accuracy: 0.7398 - val_loss: 0.7775 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7462 - loss: 0.7807 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.83 0.72 0.77 1000 1 0.88 0.84 0.86 1000 2 0.71 0.59 0.64 1000 3 0.63 0.49 0.55 1000 4 0.64 0.76 0.69 1000 5 0.64 0.69 0.66 1000 6 0.70 0.87 0.78 1000 7 0.77 0.78 0.78 1000 8 0.83 0.86 0.84 1000 9 0.81 0.83 0.82 1000 accuracy 0.74 10000 macro avg 0.74 0.74 0.74 10000 weighted avg 0.74 0.74 0.74 10000 Accuracy Score: 0.7416 Root Mean Square Error: 2.0865761428713787
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_128_256_512'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 27s 26ms/step - accuracy: 0.3281 - loss: 1.8146 - val_accuracy: 0.5386 - val_loss: 1.4395 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.5617 - loss: 1.2437 - val_accuracy: 0.6482 - val_loss: 1.1671 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6206 - loss: 1.0864 - val_accuracy: 0.6746 - val_loss: 1.0786 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6689 - loss: 0.9509 - val_accuracy: 0.6918 - val_loss: 1.0420 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.6868 - loss: 0.8937 - val_accuracy: 0.7172 - val_loss: 0.9329 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 21s 16ms/step - accuracy: 0.7055 - loss: 0.8408 - val_accuracy: 0.7106 - val_loss: 0.9259 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 15ms/step - accuracy: 0.7175 - loss: 0.8006 - val_accuracy: 0.7254 - val_loss: 0.8725 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 14ms/step - accuracy: 0.7325 - loss: 0.7658 - val_accuracy: 0.7394 - val_loss: 0.8195 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7430 - loss: 0.7347 - val_accuracy: 0.7266 - val_loss: 0.8372 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7506 - loss: 0.7092 - val_accuracy: 0.6950 - val_loss: 0.9184 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7537 - loss: 0.7029 - val_accuracy: 0.7212 - val_loss: 0.8335 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7213 - loss: 0.8511 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.79 0.79 0.79 1000 1 0.89 0.84 0.86 1000 2 0.58 0.66 0.62 1000 3 0.63 0.43 0.51 1000 4 0.64 0.73 0.68 1000 5 0.77 0.46 0.58 1000 6 0.57 0.91 0.70 1000 7 0.84 0.71 0.77 1000 8 0.74 0.92 0.82 1000 9 0.91 0.73 0.81 1000 accuracy 0.72 10000 macro avg 0.74 0.72 0.71 10000 weighted avg 0.74 0.72 0.71 10000 Accuracy Score: 0.7176 Root Mean Square Error: 2.1007855673533173
In [ ]:
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
Out[ ]:
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
| 17 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.703 | 0.688 | 0.690 | 0.858 | 0.939 | 0.940 | 70.261 |
| 18 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.752 | 0.723 | 0.733 | 0.714 | 0.815 | 0.822 | 107.098 |
| 19 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.781 | 0.741 | 0.744 | 0.629 | 0.789 | 0.793 | 111.724 |
| 20 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.732 | 0.726 | 0.730 | 0.772 | 0.813 | 0.822 | 71.486 |
| 21 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.709 | 0.711 | 0.704 | 0.838 | 0.912 | 0.923 | 88.232 |
| 22 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.745 | 0.709 | 0.708 | 0.735 | 0.880 | 0.887 | 76.367 |
| 23 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.788 | 0.740 | 0.742 | 0.609 | 0.777 | 0.787 | 100.630 |
| 24 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.737 | 0.713 | 0.711 | 0.758 | 0.856 | 0.877 | 75.341 |
| 25 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.778 | 0.732 | 0.740 | 0.639 | 0.763 | 0.769 | 136.856 |
| 26 | CNN_DO_MP_DO_X3_128_256_512 | 0.753 | 0.721 | 0.718 | 0.702 | 0.833 | 0.853 | 161.721 |
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_64_128_256'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 23s 19ms/step - accuracy: 0.3181 - loss: 1.8331 - val_accuracy: 0.5126 - val_loss: 1.5171 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5373 - loss: 1.3018 - val_accuracy: 0.6064 - val_loss: 1.2708 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5944 - loss: 1.1549 - val_accuracy: 0.6412 - val_loss: 1.1572 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6307 - loss: 1.0467 - val_accuracy: 0.6742 - val_loss: 1.0769 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6560 - loss: 0.9872 - val_accuracy: 0.6806 - val_loss: 1.0088 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6736 - loss: 0.9372 - val_accuracy: 0.7130 - val_loss: 1.0116 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6900 - loss: 0.8882 - val_accuracy: 0.7146 - val_loss: 0.9262 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7019 - loss: 0.8520 - val_accuracy: 0.7016 - val_loss: 0.9460 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7103 - loss: 0.8327 - val_accuracy: 0.7024 - val_loss: 0.9153 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7191 - loss: 0.8067 - val_accuracy: 0.7338 - val_loss: 0.8750 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7269 - loss: 0.7934 - val_accuracy: 0.7320 - val_loss: 0.8843 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7292 - loss: 0.7686 - val_accuracy: 0.7476 - val_loss: 0.8055 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7376 - loss: 0.7503 - val_accuracy: 0.7452 - val_loss: 0.8051 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7367 - loss: 0.7452 - val_accuracy: 0.7534 - val_loss: 0.7829 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7434 - loss: 0.7379 - val_accuracy: 0.7522 - val_loss: 0.7883 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7501 - loss: 0.7142 - val_accuracy: 0.7392 - val_loss: 0.7972 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7535 - loss: 0.7012 - val_accuracy: 0.7614 - val_loss: 0.7688 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7576 - loss: 0.6982 - val_accuracy: 0.7652 - val_loss: 0.7429 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7567 - loss: 0.6983 - val_accuracy: 0.7656 - val_loss: 0.7353 Epoch 20/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7638 - loss: 0.6777 - val_accuracy: 0.7506 - val_loss: 0.7515 Epoch 21/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7652 - loss: 0.6736 - val_accuracy: 0.7594 - val_loss: 0.7291 Epoch 22/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 6ms/step - accuracy: 0.7686 - loss: 0.6561 - val_accuracy: 0.7610 - val_loss: 0.7190 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7504 - loss: 0.7494 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.85 0.72 0.78 1000 1 0.92 0.86 0.89 1000 2 0.68 0.60 0.64 1000 3 0.63 0.57 0.60 1000 4 0.60 0.83 0.70 1000 5 0.78 0.58 0.67 1000 6 0.63 0.94 0.76 1000 7 0.89 0.71 0.79 1000 8 0.80 0.89 0.84 1000 9 0.88 0.82 0.85 1000 accuracy 0.75 10000 macro avg 0.77 0.75 0.75 10000 weighted avg 0.77 0.75 0.75 10000 Accuracy Score: 0.7515 Root Mean Square Error: 1.961479033790573
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_32_64_128'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 12ms/step - accuracy: 0.2883 - loss: 1.9172 - val_accuracy: 0.5010 - val_loss: 1.5772 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 5ms/step - accuracy: 0.5022 - loss: 1.3854 - val_accuracy: 0.5662 - val_loss: 1.3735 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5559 - loss: 1.2513 - val_accuracy: 0.6006 - val_loss: 1.2827 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5831 - loss: 1.1709 - val_accuracy: 0.6274 - val_loss: 1.1893 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6091 - loss: 1.1116 - val_accuracy: 0.6524 - val_loss: 1.1418 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6272 - loss: 1.0584 - val_accuracy: 0.6334 - val_loss: 1.1236 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6374 - loss: 1.0392 - val_accuracy: 0.6304 - val_loss: 1.1232 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.6517 - loss: 0.9970 - val_accuracy: 0.6884 - val_loss: 1.0392 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6526 - loss: 0.9814 - val_accuracy: 0.6578 - val_loss: 1.0467 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6606 - loss: 0.9668 - val_accuracy: 0.6822 - val_loss: 0.9942 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6722 - loss: 0.9359 - val_accuracy: 0.7016 - val_loss: 1.0022 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6783 - loss: 0.9274 - val_accuracy: 0.6920 - val_loss: 0.9720 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6820 - loss: 0.9149 - val_accuracy: 0.6836 - val_loss: 0.9839 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6894 - loss: 0.8923 - val_accuracy: 0.7222 - val_loss: 0.9320 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6839 - loss: 0.8978 - val_accuracy: 0.6848 - val_loss: 0.9624 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6952 - loss: 0.8758 - val_accuracy: 0.6836 - val_loss: 0.9239 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6916 - loss: 0.8813 - val_accuracy: 0.7282 - val_loss: 0.8712 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6925 - loss: 0.8679 - val_accuracy: 0.7198 - val_loss: 0.8608 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7027 - loss: 0.8450 - val_accuracy: 0.6966 - val_loss: 0.9245 Epoch 20/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.7043 - loss: 0.8439 - val_accuracy: 0.7130 - val_loss: 0.8731 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7179 - loss: 0.8845 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.88 0.65 0.75 1000 1 0.92 0.77 0.84 1000 2 0.66 0.56 0.61 1000 3 0.60 0.49 0.54 1000 4 0.57 0.81 0.67 1000 5 0.70 0.60 0.65 1000 6 0.58 0.91 0.71 1000 7 0.82 0.71 0.76 1000 8 0.80 0.87 0.83 1000 9 0.83 0.82 0.83 1000 accuracy 0.72 10000 macro avg 0.74 0.72 0.72 10000 weighted avg 0.74 0.72 0.72 10000 Accuracy Score: 0.7181 Root Mean Square Error: 2.1209667607013554
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
| 17 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.703 | 0.688 | 0.690 | 0.858 | 0.939 | 0.940 | 70.261 |
| 18 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.752 | 0.723 | 0.733 | 0.714 | 0.815 | 0.822 | 107.098 |
| 19 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.781 | 0.741 | 0.744 | 0.629 | 0.789 | 0.793 | 111.724 |
| 20 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.732 | 0.726 | 0.730 | 0.772 | 0.813 | 0.822 | 71.486 |
| 21 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.709 | 0.711 | 0.704 | 0.838 | 0.912 | 0.923 | 88.232 |
| 22 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.745 | 0.709 | 0.708 | 0.735 | 0.880 | 0.887 | 76.367 |
| 23 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.788 | 0.740 | 0.742 | 0.609 | 0.777 | 0.787 | 100.630 |
| 24 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.737 | 0.713 | 0.711 | 0.758 | 0.856 | 0.877 | 75.341 |
| 25 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.778 | 0.732 | 0.740 | 0.639 | 0.763 | 0.769 | 136.856 |
| 26 | CNN_DO_MP_DO_X3_128_256_512 | 0.753 | 0.721 | 0.718 | 0.702 | 0.833 | 0.853 | 161.721 |
| 27 | CNN_DO_MP_DO_X3_64_128_256 | 0.765 | 0.761 | 0.752 | 0.666 | 0.719 | 0.748 | 133.832 |
| 28 | CNN_DO_MP_DO_X3_32_64_128 | 0.703 | 0.713 | 0.718 | 0.847 | 0.873 | 0.884 | 102.764 |
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_64_128_256-1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3341 - loss: 1.7942 - val_accuracy: 0.5576 - val_loss: 1.4894 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5479 - loss: 1.2744 - val_accuracy: 0.6188 - val_loss: 1.2519 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6034 - loss: 1.1208 - val_accuracy: 0.6562 - val_loss: 1.1637 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6377 - loss: 1.0297 - val_accuracy: 0.6894 - val_loss: 1.0645 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6699 - loss: 0.9572 - val_accuracy: 0.6980 - val_loss: 1.0102 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6791 - loss: 0.9193 - val_accuracy: 0.6716 - val_loss: 1.0189 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6915 - loss: 0.8792 - val_accuracy: 0.7208 - val_loss: 0.8952 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7026 - loss: 0.8463 - val_accuracy: 0.7174 - val_loss: 0.8994 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7189 - loss: 0.8148 - val_accuracy: 0.7450 - val_loss: 0.8631 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7257 - loss: 0.7869 - val_accuracy: 0.7198 - val_loss: 0.8711 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7326 - loss: 0.7659 - val_accuracy: 0.7226 - val_loss: 0.8606 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7346 - loss: 0.7603 - val_accuracy: 0.7182 - val_loss: 0.8374 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7191 - loss: 0.8612 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.86 0.69 0.77 1000 1 0.95 0.76 0.85 1000 2 0.61 0.62 0.62 1000 3 0.50 0.59 0.54 1000 4 0.67 0.72 0.70 1000 5 0.68 0.56 0.61 1000 6 0.58 0.92 0.71 1000 7 0.92 0.64 0.76 1000 8 0.82 0.86 0.84 1000 9 0.85 0.79 0.82 1000 accuracy 0.72 10000 macro avg 0.74 0.72 0.72 10000 weighted avg 0.74 0.72 0.72 10000 Accuracy Score: 0.7162 Root Mean Square Error: 2.0617953341687434
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_64_128_256-2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 14ms/step - accuracy: 0.3142 - loss: 1.8428 - val_accuracy: 0.5426 - val_loss: 1.4761 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5375 - loss: 1.3014 - val_accuracy: 0.5802 - val_loss: 1.3091 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5954 - loss: 1.1498 - val_accuracy: 0.6390 - val_loss: 1.1598 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6356 - loss: 1.0460 - val_accuracy: 0.6600 - val_loss: 1.1073 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6556 - loss: 0.9871 - val_accuracy: 0.6774 - val_loss: 1.0242 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6724 - loss: 0.9385 - val_accuracy: 0.6832 - val_loss: 1.0019 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6856 - loss: 0.8980 - val_accuracy: 0.7110 - val_loss: 0.9578 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6967 - loss: 0.8664 - val_accuracy: 0.7044 - val_loss: 0.9627 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7087 - loss: 0.8373 - val_accuracy: 0.7280 - val_loss: 0.9038 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7171 - loss: 0.8117 - val_accuracy: 0.7386 - val_loss: 0.8762 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7209 - loss: 0.7994 - val_accuracy: 0.7136 - val_loss: 0.9047 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7224 - loss: 0.7972 - val_accuracy: 0.7334 - val_loss: 0.8688 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7339 - loss: 0.7641 - val_accuracy: 0.7410 - val_loss: 0.8072 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7356 - loss: 0.7513 - val_accuracy: 0.7400 - val_loss: 0.8029 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7413 - loss: 0.7500 - val_accuracy: 0.7458 - val_loss: 0.8300 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7494 - loss: 0.7211 - val_accuracy: 0.7404 - val_loss: 0.7801 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7510 - loss: 0.7175 - val_accuracy: 0.7012 - val_loss: 0.8612 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7519 - loss: 0.7092 - val_accuracy: 0.7446 - val_loss: 0.7759 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7418 - loss: 0.7846 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.83 0.73 0.78 1000 1 0.93 0.81 0.87 1000 2 0.69 0.58 0.63 1000 3 0.64 0.46 0.54 1000 4 0.55 0.86 0.67 1000 5 0.74 0.60 0.66 1000 6 0.70 0.89 0.78 1000 7 0.86 0.74 0.80 1000 8 0.73 0.93 0.82 1000 9 0.89 0.78 0.83 1000 accuracy 0.74 10000 macro avg 0.75 0.74 0.74 10000 weighted avg 0.75 0.74 0.74 10000 Accuracy Score: 0.739 Root Mean Square Error: 2.018316129846858
In [ ]:
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_X3_64_128_256-DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(units=128, activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 27s 18ms/step - accuracy: 0.2668 - loss: 1.9471 - val_accuracy: 0.4806 - val_loss: 1.5537 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 7ms/step - accuracy: 0.5027 - loss: 1.3920 - val_accuracy: 0.5710 - val_loss: 1.3219 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5660 - loss: 1.2126 - val_accuracy: 0.6152 - val_loss: 1.1970 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5979 - loss: 1.1351 - val_accuracy: 0.5774 - val_loss: 1.2030 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6311 - loss: 1.0413 - val_accuracy: 0.5824 - val_loss: 1.1998 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6499 - loss: 0.9863 - val_accuracy: 0.6576 - val_loss: 0.9886 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6707 - loss: 0.9379 - val_accuracy: 0.6982 - val_loss: 0.9347 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6821 - loss: 0.9041 - val_accuracy: 0.7122 - val_loss: 0.8631 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6890 - loss: 0.8909 - val_accuracy: 0.6118 - val_loss: 1.0908 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7022 - loss: 0.8497 - val_accuracy: 0.7148 - val_loss: 0.8302 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7069 - loss: 0.8364 - val_accuracy: 0.7014 - val_loss: 0.8893 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7076 - loss: 0.8298 - val_accuracy: 0.7058 - val_loss: 0.8393 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7174 - loss: 0.8061 - val_accuracy: 0.7034 - val_loss: 0.8497 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7086 - loss: 0.8630 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.88 0.58 0.70 1000 1 0.91 0.84 0.88 1000 2 0.66 0.53 0.59 1000 3 0.61 0.43 0.51 1000 4 0.52 0.81 0.63 1000 5 0.72 0.54 0.61 1000 6 0.59 0.91 0.71 1000 7 0.81 0.71 0.76 1000 8 0.70 0.92 0.79 1000 9 0.90 0.77 0.83 1000 accuracy 0.70 10000 macro avg 0.73 0.70 0.70 10000 weighted avg 0.73 0.70 0.70 10000 Accuracy Score: 0.7034 Root Mean Square Error: 2.2119448456053328
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
| 17 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.703 | 0.688 | 0.690 | 0.858 | 0.939 | 0.940 | 70.261 |
| 18 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.752 | 0.723 | 0.733 | 0.714 | 0.815 | 0.822 | 107.098 |
| 19 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.781 | 0.741 | 0.744 | 0.629 | 0.789 | 0.793 | 111.724 |
| 20 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.732 | 0.726 | 0.730 | 0.772 | 0.813 | 0.822 | 71.486 |
| 21 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.709 | 0.711 | 0.704 | 0.838 | 0.912 | 0.923 | 88.232 |
| 22 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.745 | 0.709 | 0.708 | 0.735 | 0.880 | 0.887 | 76.367 |
| 23 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.788 | 0.740 | 0.742 | 0.609 | 0.777 | 0.787 | 100.630 |
| 24 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.737 | 0.713 | 0.711 | 0.758 | 0.856 | 0.877 | 75.341 |
| 25 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.778 | 0.732 | 0.740 | 0.639 | 0.763 | 0.769 | 136.856 |
| 26 | CNN_DO_MP_DO_X3_128_256_512 | 0.753 | 0.721 | 0.718 | 0.702 | 0.833 | 0.853 | 161.721 |
| 27 | CNN_DO_MP_DO_X3_64_128_256 | 0.765 | 0.761 | 0.752 | 0.666 | 0.719 | 0.748 | 133.832 |
| 28 | CNN_DO_MP_DO_X3_32_64_128 | 0.703 | 0.713 | 0.718 | 0.847 | 0.873 | 0.884 | 102.764 |
| 29 | CNN_DO_MP_DO_X3_64_128_256-1 | 0.735 | 0.718 | 0.716 | 0.759 | 0.837 | 0.857 | 72.193 |
| 30 | CNN_DO_MP_DO_X3_64_128_256-2 | 0.750 | 0.745 | 0.739 | 0.713 | 0.776 | 0.789 | 101.459 |
| 31 | CNN_DO_MP_DO_X3_64_128_256-DNN | 0.716 | 0.703 | 0.703 | 0.811 | 0.850 | 0.866 | 114.463 |
In [ ]:
for i in range(3):
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-3-{i}'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 32s 24ms/step - accuracy: 0.3265 - loss: 1.8252 - val_accuracy: 0.5456 - val_loss: 1.4853 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 7ms/step - accuracy: 0.5434 - loss: 1.2788 - val_accuracy: 0.6184 - val_loss: 1.2192 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6099 - loss: 1.1130 - val_accuracy: 0.6550 - val_loss: 1.1546 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6432 - loss: 1.0176 - val_accuracy: 0.6656 - val_loss: 1.0567 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6689 - loss: 0.9475 - val_accuracy: 0.6882 - val_loss: 1.0203 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6809 - loss: 0.9108 - val_accuracy: 0.6886 - val_loss: 0.9760 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6942 - loss: 0.8733 - val_accuracy: 0.6992 - val_loss: 0.9488 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7109 - loss: 0.8291 - val_accuracy: 0.7272 - val_loss: 0.8831 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7180 - loss: 0.8169 - val_accuracy: 0.7086 - val_loss: 0.8900 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7238 - loss: 0.7892 - val_accuracy: 0.7234 - val_loss: 0.8617 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7297 - loss: 0.7720 - val_accuracy: 0.7202 - val_loss: 0.8336 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7357 - loss: 0.8347 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.81 0.76 0.78 1000 1 0.88 0.87 0.88 1000 2 0.72 0.49 0.59 1000 3 0.57 0.54 0.56 1000 4 0.60 0.81 0.69 1000 5 0.72 0.58 0.65 1000 6 0.60 0.91 0.72 1000 7 0.84 0.74 0.78 1000 8 0.81 0.89 0.85 1000 9 0.92 0.72 0.81 1000 accuracy 0.73 10000 macro avg 0.75 0.73 0.73 10000 weighted avg 0.75 0.73 0.73 10000 Accuracy Score: 0.7313 Root Mean Square Error: 1.993589727100338
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 15ms/step - accuracy: 0.3190 - loss: 1.8456 - val_accuracy: 0.5394 - val_loss: 1.4768 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5372 - loss: 1.2956 - val_accuracy: 0.6254 - val_loss: 1.2632 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6077 - loss: 1.1196 - val_accuracy: 0.6536 - val_loss: 1.1471 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6416 - loss: 1.0268 - val_accuracy: 0.6716 - val_loss: 1.0740 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6618 - loss: 0.9644 - val_accuracy: 0.6836 - val_loss: 1.0278 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6792 - loss: 0.9152 - val_accuracy: 0.6728 - val_loss: 1.0023 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6925 - loss: 0.8783 - val_accuracy: 0.6920 - val_loss: 0.9195 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7040 - loss: 0.8503 - val_accuracy: 0.7270 - val_loss: 0.8619 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7137 - loss: 0.8241 - val_accuracy: 0.7028 - val_loss: 0.9029 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7167 - loss: 0.7998 - val_accuracy: 0.7160 - val_loss: 0.8757 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7254 - loss: 0.7862 - val_accuracy: 0.7144 - val_loss: 0.8716 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7099 - loss: 0.8803 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.82 0.76 0.78 1000 1 0.92 0.82 0.87 1000 2 0.70 0.51 0.59 1000 3 0.55 0.51 0.53 1000 4 0.59 0.74 0.65 1000 5 0.83 0.42 0.55 1000 6 0.52 0.95 0.67 1000 7 0.86 0.69 0.77 1000 8 0.76 0.91 0.83 1000 9 0.84 0.82 0.83 1000 accuracy 0.71 10000 macro avg 0.74 0.71 0.71 10000 weighted avg 0.74 0.71 0.71 10000 Accuracy Score: 0.7106 Root Mean Square Error: 2.116719159454083
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 14ms/step - accuracy: 0.3236 - loss: 1.8244 - val_accuracy: 0.5452 - val_loss: 1.4646 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5489 - loss: 1.2717 - val_accuracy: 0.5994 - val_loss: 1.3000 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6084 - loss: 1.1187 - val_accuracy: 0.6270 - val_loss: 1.1636 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6391 - loss: 1.0263 - val_accuracy: 0.6900 - val_loss: 1.0586 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6671 - loss: 0.9550 - val_accuracy: 0.6938 - val_loss: 1.0261 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6851 - loss: 0.9013 - val_accuracy: 0.7058 - val_loss: 0.9347 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6960 - loss: 0.8743 - val_accuracy: 0.7200 - val_loss: 0.8955 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7087 - loss: 0.8344 - val_accuracy: 0.7352 - val_loss: 0.8849 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7177 - loss: 0.8099 - val_accuracy: 0.7322 - val_loss: 0.8682 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7214 - loss: 0.7940 - val_accuracy: 0.7268 - val_loss: 0.8537 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7349 - loss: 0.7623 - val_accuracy: 0.7384 - val_loss: 0.8457 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7404 - loss: 0.7435 - val_accuracy: 0.7070 - val_loss: 0.8796 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7428 - loss: 0.7270 - val_accuracy: 0.7584 - val_loss: 0.8012 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7480 - loss: 0.7229 - val_accuracy: 0.7520 - val_loss: 0.7785 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7560 - loss: 0.7026 - val_accuracy: 0.7696 - val_loss: 0.7566 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7530 - loss: 0.7066 - val_accuracy: 0.7642 - val_loss: 0.7326 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7575 - loss: 0.6916 - val_accuracy: 0.7558 - val_loss: 0.7510 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7592 - loss: 0.6799 - val_accuracy: 0.7586 - val_loss: 0.7659 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7510 - loss: 0.7846 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.82 0.78 0.80 1000 1 0.92 0.84 0.88 1000 2 0.67 0.63 0.65 1000 3 0.60 0.57 0.58 1000 4 0.61 0.80 0.69 1000 5 0.74 0.60 0.66 1000 6 0.67 0.88 0.76 1000 7 0.89 0.71 0.79 1000 8 0.83 0.87 0.85 1000 9 0.87 0.81 0.84 1000 accuracy 0.75 10000 macro avg 0.76 0.75 0.75 10000 weighted avg 0.76 0.75 0.75 10000 Accuracy Score: 0.7486 Root Mean Square Error: 1.9880140844571499
In [ ]:
for i in range(3):
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-2-{i}'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=2)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 15ms/step - accuracy: 0.3383 - loss: 1.8005 - val_accuracy: 0.5728 - val_loss: 1.4031 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5574 - loss: 1.2574 - val_accuracy: 0.6096 - val_loss: 1.2575 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6089 - loss: 1.1148 - val_accuracy: 0.6592 - val_loss: 1.1421 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6471 - loss: 1.0145 - val_accuracy: 0.7006 - val_loss: 1.0201 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6691 - loss: 0.9474 - val_accuracy: 0.6498 - val_loss: 1.0551 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6902 - loss: 0.8934 - val_accuracy: 0.7154 - val_loss: 0.9913 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6994 - loss: 0.8586 - val_accuracy: 0.7054 - val_loss: 0.9069 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7091 - loss: 0.8306 - val_accuracy: 0.7164 - val_loss: 0.8988 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7182 - loss: 0.8039 - val_accuracy: 0.7364 - val_loss: 0.8526 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7270 - loss: 0.7837 - val_accuracy: 0.7336 - val_loss: 0.8486 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7356 - loss: 0.7611 - val_accuracy: 0.7374 - val_loss: 0.8222 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7370 - loss: 0.7544 - val_accuracy: 0.7442 - val_loss: 0.8146 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7444 - loss: 0.7300 - val_accuracy: 0.7414 - val_loss: 0.8067 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7486 - loss: 0.7205 - val_accuracy: 0.7406 - val_loss: 0.7955 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7400 - loss: 0.8178 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.81 0.78 0.79 1000 1 0.93 0.77 0.84 1000 2 0.77 0.52 0.62 1000 3 0.61 0.52 0.56 1000 4 0.55 0.83 0.66 1000 5 0.67 0.65 0.66 1000 6 0.63 0.91 0.74 1000 7 0.87 0.68 0.77 1000 8 0.80 0.89 0.84 1000 9 0.88 0.78 0.83 1000 accuracy 0.73 10000 macro avg 0.75 0.73 0.73 10000 weighted avg 0.75 0.73 0.73 10000 Accuracy Score: 0.7324 Root Mean Square Error: 2.0436976292984244
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 15ms/step - accuracy: 0.3329 - loss: 1.8143 - val_accuracy: 0.5280 - val_loss: 1.4747 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5409 - loss: 1.2889 - val_accuracy: 0.5850 - val_loss: 1.3343 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6025 - loss: 1.1322 - val_accuracy: 0.6304 - val_loss: 1.2018 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6362 - loss: 1.0440 - val_accuracy: 0.6784 - val_loss: 1.0791 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6541 - loss: 0.9845 - val_accuracy: 0.7004 - val_loss: 1.0153 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6817 - loss: 0.9118 - val_accuracy: 0.7106 - val_loss: 0.9834 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6957 - loss: 0.8760 - val_accuracy: 0.7154 - val_loss: 0.9237 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7088 - loss: 0.8401 - val_accuracy: 0.7252 - val_loss: 0.8850 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7126 - loss: 0.8141 - val_accuracy: 0.7326 - val_loss: 0.8660 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7234 - loss: 0.7947 - val_accuracy: 0.7316 - val_loss: 0.8715 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7262 - loss: 0.7811 - val_accuracy: 0.7330 - val_loss: 0.8350 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7324 - loss: 0.7717 - val_accuracy: 0.7324 - val_loss: 0.8270 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7430 - loss: 0.7404 - val_accuracy: 0.7348 - val_loss: 0.8211 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7508 - loss: 0.7141 - val_accuracy: 0.7464 - val_loss: 0.7990 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7526 - loss: 0.7140 - val_accuracy: 0.7400 - val_loss: 0.8199 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7544 - loss: 0.7087 - val_accuracy: 0.7410 - val_loss: 0.7850 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7315 - loss: 0.8067 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.84 0.76 0.79 1000 1 0.91 0.86 0.89 1000 2 0.72 0.54 0.62 1000 3 0.56 0.55 0.56 1000 4 0.58 0.81 0.68 1000 5 0.76 0.54 0.63 1000 6 0.58 0.92 0.71 1000 7 0.87 0.71 0.78 1000 8 0.83 0.87 0.85 1000 9 0.89 0.79 0.84 1000 accuracy 0.73 10000 macro avg 0.76 0.73 0.73 10000 weighted avg 0.76 0.73 0.73 10000 Accuracy Score: 0.7349 Root Mean Square Error: 1.9666723163760658
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 14ms/step - accuracy: 0.3263 - loss: 1.8125 - val_accuracy: 0.5112 - val_loss: 1.4722 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5433 - loss: 1.2738 - val_accuracy: 0.6184 - val_loss: 1.2663 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6037 - loss: 1.1140 - val_accuracy: 0.6486 - val_loss: 1.1542 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6421 - loss: 1.0190 - val_accuracy: 0.6600 - val_loss: 1.0827 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6639 - loss: 0.9693 - val_accuracy: 0.6782 - val_loss: 1.0224 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6881 - loss: 0.8982 - val_accuracy: 0.7050 - val_loss: 0.9507 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6980 - loss: 0.8725 - val_accuracy: 0.7172 - val_loss: 0.9161 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7061 - loss: 0.8456 - val_accuracy: 0.6622 - val_loss: 0.9808 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7184 - loss: 0.8028 - val_accuracy: 0.7026 - val_loss: 0.8990 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6998 - loss: 0.9190 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.84 0.69 0.76 1000 1 0.92 0.81 0.86 1000 2 0.65 0.54 0.59 1000 3 0.57 0.47 0.51 1000 4 0.55 0.76 0.64 1000 5 0.66 0.57 0.61 1000 6 0.55 0.94 0.69 1000 7 0.90 0.60 0.72 1000 8 0.77 0.87 0.82 1000 9 0.89 0.76 0.82 1000 accuracy 0.70 10000 macro avg 0.73 0.70 0.70 10000 weighted avg 0.73 0.70 0.70 10000 Accuracy Score: 0.702 Root Mean Square Error: 2.0995237555217137
In [ ]:
for i in range(3):
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-1-{i}'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=1)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 13ms/step - accuracy: 0.3270 - loss: 1.8141 - val_accuracy: 0.5348 - val_loss: 1.4791 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5310 - loss: 1.3056 - val_accuracy: 0.6092 - val_loss: 1.2793 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5975 - loss: 1.1469 - val_accuracy: 0.6370 - val_loss: 1.1749 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6362 - loss: 1.0363 - val_accuracy: 0.6780 - val_loss: 1.0594 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6608 - loss: 0.9642 - val_accuracy: 0.6888 - val_loss: 1.0420 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6752 - loss: 0.9287 - val_accuracy: 0.6878 - val_loss: 0.9944 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6918 - loss: 1.0046 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.78 0.74 0.76 1000 1 0.87 0.81 0.84 1000 2 0.62 0.55 0.58 1000 3 0.46 0.58 0.51 1000 4 0.52 0.78 0.63 1000 5 0.56 0.65 0.60 1000 6 0.81 0.68 0.73 1000 7 0.86 0.60 0.70 1000 8 0.79 0.83 0.81 1000 9 0.88 0.68 0.77 1000 accuracy 0.69 10000 macro avg 0.72 0.69 0.69 10000 weighted avg 0.72 0.69 0.69 10000 Accuracy Score: 0.6881 Root Mean Square Error: 2.1743734729802053
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3216 - loss: 1.8306 - val_accuracy: 0.4618 - val_loss: 1.6185 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5421 - loss: 1.2891 - val_accuracy: 0.6244 - val_loss: 1.2552 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6093 - loss: 1.1033 - val_accuracy: 0.6464 - val_loss: 1.1279 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6449 - loss: 1.0141 - val_accuracy: 0.6866 - val_loss: 1.0180 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6685 - loss: 0.9418 - val_accuracy: 0.6818 - val_loss: 0.9982 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6916 - loss: 1.0109 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.83 0.58 0.68 1000 1 0.95 0.71 0.81 1000 2 0.63 0.51 0.56 1000 3 0.47 0.67 0.55 1000 4 0.54 0.77 0.63 1000 5 0.66 0.55 0.60 1000 6 0.80 0.72 0.76 1000 7 0.81 0.66 0.73 1000 8 0.64 0.93 0.76 1000 9 0.84 0.75 0.79 1000 accuracy 0.69 10000 macro avg 0.72 0.69 0.69 10000 weighted avg 0.72 0.69 0.69 10000 Accuracy Score: 0.6857 Root Mean Square Error: 2.305775357661713
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3284 - loss: 1.8136 - val_accuracy: 0.5510 - val_loss: 1.4374 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.5519 - loss: 1.2567 - val_accuracy: 0.6390 - val_loss: 1.2332 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.6172 - loss: 1.0958 - val_accuracy: 0.6776 - val_loss: 1.1067 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6507 - loss: 1.0020 - val_accuracy: 0.6996 - val_loss: 1.0651 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6670 - loss: 0.9535 - val_accuracy: 0.6980 - val_loss: 1.0101 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.6986 - loss: 1.0182 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.81 0.71 0.76 1000 1 0.89 0.82 0.85 1000 2 0.71 0.42 0.53 1000 3 0.49 0.52 0.50 1000 4 0.68 0.57 0.62 1000 5 0.52 0.73 0.61 1000 6 0.60 0.90 0.72 1000 7 0.78 0.71 0.74 1000 8 0.82 0.83 0.83 1000 9 0.86 0.76 0.81 1000 accuracy 0.70 10000 macro avg 0.72 0.70 0.70 10000 weighted avg 0.72 0.70 0.70 10000 Accuracy Score: 0.6978 Root Mean Square Error: 2.129248693788492
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
| 17 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.703 | 0.688 | 0.690 | 0.858 | 0.939 | 0.940 | 70.261 |
| 18 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.752 | 0.723 | 0.733 | 0.714 | 0.815 | 0.822 | 107.098 |
| 19 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.781 | 0.741 | 0.744 | 0.629 | 0.789 | 0.793 | 111.724 |
| 20 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.732 | 0.726 | 0.730 | 0.772 | 0.813 | 0.822 | 71.486 |
| 21 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.709 | 0.711 | 0.704 | 0.838 | 0.912 | 0.923 | 88.232 |
| 22 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.745 | 0.709 | 0.708 | 0.735 | 0.880 | 0.887 | 76.367 |
| 23 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.788 | 0.740 | 0.742 | 0.609 | 0.777 | 0.787 | 100.630 |
| 24 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.737 | 0.713 | 0.711 | 0.758 | 0.856 | 0.877 | 75.341 |
| 25 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.778 | 0.732 | 0.740 | 0.639 | 0.763 | 0.769 | 136.856 |
| 26 | CNN_DO_MP_DO_X3_128_256_512 | 0.753 | 0.721 | 0.718 | 0.702 | 0.833 | 0.853 | 161.721 |
| 27 | CNN_DO_MP_DO_X3_64_128_256 | 0.765 | 0.761 | 0.752 | 0.666 | 0.719 | 0.748 | 133.832 |
| 28 | CNN_DO_MP_DO_X3_32_64_128 | 0.703 | 0.713 | 0.718 | 0.847 | 0.873 | 0.884 | 102.764 |
| 29 | CNN_DO_MP_DO_X3_64_128_256-1 | 0.735 | 0.718 | 0.716 | 0.759 | 0.837 | 0.857 | 72.193 |
| 30 | CNN_DO_MP_DO_X3_64_128_256-2 | 0.750 | 0.745 | 0.739 | 0.713 | 0.776 | 0.789 | 101.459 |
| 31 | CNN_DO_MP_DO_X3_64_128_256-DNN | 0.716 | 0.703 | 0.703 | 0.811 | 0.850 | 0.866 | 114.463 |
| 32 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-0 | 0.731 | 0.720 | 0.731 | 0.773 | 0.834 | 0.840 | 104.831 |
| 33 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-1 | 0.724 | 0.714 | 0.711 | 0.789 | 0.872 | 0.883 | 68.565 |
| 34 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-2 | 0.761 | 0.759 | 0.749 | 0.683 | 0.766 | 0.788 | 107.973 |
| 35 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-0 | 0.748 | 0.741 | 0.732 | 0.724 | 0.795 | 0.819 | 87.255 |
| 36 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-1 | 0.750 | 0.741 | 0.735 | 0.716 | 0.785 | 0.802 | 91.753 |
| 37 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-2 | 0.719 | 0.703 | 0.702 | 0.806 | 0.899 | 0.914 | 62.646 |
| 38 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-0 | 0.676 | 0.688 | 0.688 | 0.929 | 0.994 | 1.005 | 41.329 |
| 39 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-1 | 0.673 | 0.682 | 0.686 | 0.933 | 0.998 | 1.012 | 42.944 |
| 40 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-2 | 0.674 | 0.698 | 0.698 | 0.938 | 1.010 | 1.021 | 50.324 |
In [ ]:
for i in range(3):
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-4-{i}'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=4)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 14ms/step - accuracy: 0.3183 - loss: 1.8414 - val_accuracy: 0.5372 - val_loss: 1.4382 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5495 - loss: 1.2722 - val_accuracy: 0.5974 - val_loss: 1.3085 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5995 - loss: 1.1256 - val_accuracy: 0.6524 - val_loss: 1.1627 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6412 - loss: 1.0276 - val_accuracy: 0.6690 - val_loss: 1.0488 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6628 - loss: 0.9685 - val_accuracy: 0.6956 - val_loss: 0.9989 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6722 - loss: 0.9255 - val_accuracy: 0.7068 - val_loss: 0.9435 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6906 - loss: 0.8782 - val_accuracy: 0.7106 - val_loss: 0.9220 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7004 - loss: 0.8510 - val_accuracy: 0.7204 - val_loss: 0.8804 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7132 - loss: 0.8254 - val_accuracy: 0.7268 - val_loss: 0.8551 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7144 - loss: 0.8193 - val_accuracy: 0.6730 - val_loss: 0.9627 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7215 - loss: 0.7977 - val_accuracy: 0.7040 - val_loss: 0.8969 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7291 - loss: 0.7782 - val_accuracy: 0.7426 - val_loss: 0.8388 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7319 - loss: 0.7663 - val_accuracy: 0.7282 - val_loss: 0.8185 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7378 - loss: 0.7437 - val_accuracy: 0.7440 - val_loss: 0.8192 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7456 - loss: 0.7267 - val_accuracy: 0.7356 - val_loss: 0.8055 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7526 - loss: 0.7079 - val_accuracy: 0.7672 - val_loss: 0.7549 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7527 - loss: 0.7064 - val_accuracy: 0.7510 - val_loss: 0.7658 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7558 - loss: 0.6973 - val_accuracy: 0.7178 - val_loss: 0.8345 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7615 - loss: 0.6821 - val_accuracy: 0.7530 - val_loss: 0.7724 Epoch 20/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7628 - loss: 0.6823 - val_accuracy: 0.7514 - val_loss: 0.7643 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7505 - loss: 0.7771 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.80 0.79 0.79 1000 1 0.91 0.86 0.88 1000 2 0.73 0.59 0.65 1000 3 0.66 0.50 0.57 1000 4 0.58 0.85 0.69 1000 5 0.72 0.62 0.67 1000 6 0.64 0.92 0.75 1000 7 0.91 0.67 0.77 1000 8 0.80 0.90 0.85 1000 9 0.90 0.77 0.83 1000 accuracy 0.75 10000 macro avg 0.76 0.75 0.75 10000 weighted avg 0.76 0.75 0.75 10000 Accuracy Score: 0.7471 Root Mean Square Error: 1.9793433254491248
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3113 - loss: 1.8545 - val_accuracy: 0.5356 - val_loss: 1.4574 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.5311 - loss: 1.3102 - val_accuracy: 0.5748 - val_loss: 1.3113 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5953 - loss: 1.1580 - val_accuracy: 0.6484 - val_loss: 1.1747 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6316 - loss: 1.0589 - val_accuracy: 0.6622 - val_loss: 1.1019 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6583 - loss: 0.9758 - val_accuracy: 0.6848 - val_loss: 1.0346 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6755 - loss: 0.9318 - val_accuracy: 0.6936 - val_loss: 1.0031 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6844 - loss: 0.9009 - val_accuracy: 0.7026 - val_loss: 0.9818 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7005 - loss: 0.8692 - val_accuracy: 0.6882 - val_loss: 0.9608 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7089 - loss: 0.8375 - val_accuracy: 0.7214 - val_loss: 0.8772 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7157 - loss: 0.8166 - val_accuracy: 0.7354 - val_loss: 0.8346 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7189 - loss: 0.8001 - val_accuracy: 0.7270 - val_loss: 0.8548 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7283 - loss: 0.7742 - val_accuracy: 0.7270 - val_loss: 0.8576 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7313 - loss: 0.7659 - val_accuracy: 0.7216 - val_loss: 0.8352 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7396 - loss: 0.7419 - val_accuracy: 0.7438 - val_loss: 0.7804 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7431 - loss: 0.7303 - val_accuracy: 0.7140 - val_loss: 0.8501 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7466 - loss: 0.7182 - val_accuracy: 0.7546 - val_loss: 0.7979 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7514 - loss: 0.7091 - val_accuracy: 0.7376 - val_loss: 0.7990 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7513 - loss: 0.7143 - val_accuracy: 0.7262 - val_loss: 0.7975 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7544 - loss: 0.7123 - val_accuracy: 0.7552 - val_loss: 0.7748 Epoch 20/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7574 - loss: 0.6942 - val_accuracy: 0.6832 - val_loss: 0.9027 Epoch 21/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7610 - loss: 0.6847 - val_accuracy: 0.7426 - val_loss: 0.7714 Epoch 22/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7678 - loss: 0.6683 - val_accuracy: 0.7636 - val_loss: 0.7585 Epoch 23/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7639 - loss: 0.6716 - val_accuracy: 0.7460 - val_loss: 0.7521 Epoch 24/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7670 - loss: 0.6637 - val_accuracy: 0.7650 - val_loss: 0.7274 Epoch 25/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7662 - loss: 0.6586 - val_accuracy: 0.7758 - val_loss: 0.7080 Epoch 26/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7736 - loss: 0.6459 - val_accuracy: 0.7464 - val_loss: 0.7421 Epoch 27/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7692 - loss: 0.6516 - val_accuracy: 0.7294 - val_loss: 0.7768 Epoch 28/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7797 - loss: 0.6280 - val_accuracy: 0.7636 - val_loss: 0.7143 Epoch 29/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7805 - loss: 0.6271 - val_accuracy: 0.7666 - val_loss: 0.7057 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7656 - loss: 0.7070 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.83 0.79 0.81 1000 1 0.93 0.84 0.88 1000 2 0.75 0.60 0.67 1000 3 0.64 0.54 0.59 1000 4 0.63 0.85 0.72 1000 5 0.67 0.72 0.69 1000 6 0.73 0.89 0.80 1000 7 0.91 0.73 0.81 1000 8 0.79 0.91 0.85 1000 9 0.90 0.81 0.85 1000 accuracy 0.77 10000 macro avg 0.78 0.77 0.77 10000 weighted avg 0.78 0.77 0.77 10000 Accuracy Score: 0.7682 Root Mean Square Error: 1.8783769589728256
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 16ms/step - accuracy: 0.3147 - loss: 1.8437 - val_accuracy: 0.5312 - val_loss: 1.4586 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5431 - loss: 1.2845 - val_accuracy: 0.6068 - val_loss: 1.2894 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6077 - loss: 1.1150 - val_accuracy: 0.6614 - val_loss: 1.1440 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6348 - loss: 1.0398 - val_accuracy: 0.6920 - val_loss: 1.0418 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6633 - loss: 0.9673 - val_accuracy: 0.6538 - val_loss: 1.0480 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6828 - loss: 0.9099 - val_accuracy: 0.6926 - val_loss: 0.9787 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6949 - loss: 0.8675 - val_accuracy: 0.6980 - val_loss: 0.9491 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7100 - loss: 0.8354 - val_accuracy: 0.6542 - val_loss: 1.0866 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7138 - loss: 0.8204 - val_accuracy: 0.7344 - val_loss: 0.8423 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7235 - loss: 0.7907 - val_accuracy: 0.7320 - val_loss: 0.8580 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7326 - loss: 0.7696 - val_accuracy: 0.6956 - val_loss: 0.8953 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7361 - loss: 0.7561 - val_accuracy: 0.7072 - val_loss: 0.8661 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7368 - loss: 0.7456 - val_accuracy: 0.6956 - val_loss: 0.8905 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6900 - loss: 0.9114 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.84 0.65 0.73 1000 1 0.96 0.73 0.83 1000 2 0.66 0.55 0.60 1000 3 0.53 0.51 0.52 1000 4 0.60 0.70 0.65 1000 5 0.85 0.39 0.53 1000 6 0.49 0.95 0.65 1000 7 0.90 0.65 0.75 1000 8 0.67 0.92 0.78 1000 9 0.79 0.82 0.81 1000 accuracy 0.69 10000 macro avg 0.73 0.69 0.69 10000 weighted avg 0.73 0.69 0.69 10000 Accuracy Score: 0.6871 Root Mean Square Error: 2.283527972239447
In [ ]:
for i in range(3):
# Build Model 2-4: CNN and MaxPool with dropouts between and after
name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-5-{i}'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.3266 - loss: 1.8180 - val_accuracy: 0.5104 - val_loss: 1.5165 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5458 - loss: 1.2759 - val_accuracy: 0.6236 - val_loss: 1.2663 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6090 - loss: 1.1290 - val_accuracy: 0.6486 - val_loss: 1.1534 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6438 - loss: 1.0277 - val_accuracy: 0.6694 - val_loss: 1.0873 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6595 - loss: 0.9691 - val_accuracy: 0.6786 - val_loss: 1.0048 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6822 - loss: 0.9183 - val_accuracy: 0.7124 - val_loss: 0.9573 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6945 - loss: 0.8762 - val_accuracy: 0.6818 - val_loss: 0.9901 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7036 - loss: 0.8516 - val_accuracy: 0.6812 - val_loss: 0.9579 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7165 - loss: 0.8132 - val_accuracy: 0.6930 - val_loss: 0.9148 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7219 - loss: 0.8021 - val_accuracy: 0.7268 - val_loss: 0.8803 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7289 - loss: 0.7822 - val_accuracy: 0.7190 - val_loss: 0.8505 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7336 - loss: 0.7626 - val_accuracy: 0.7630 - val_loss: 0.7760 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7403 - loss: 0.7352 - val_accuracy: 0.7320 - val_loss: 0.8164 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.7434 - loss: 0.7337 - val_accuracy: 0.7468 - val_loss: 0.7918 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7483 - loss: 0.7216 - val_accuracy: 0.7460 - val_loss: 0.7902 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7508 - loss: 0.7092 - val_accuracy: 0.7310 - val_loss: 0.8168 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7570 - loss: 0.6955 - val_accuracy: 0.7398 - val_loss: 0.7869 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7224 - loss: 0.8187 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.87 0.62 0.72 1000 1 0.93 0.80 0.86 1000 2 0.60 0.66 0.63 1000 3 0.56 0.58 0.57 1000 4 0.65 0.73 0.69 1000 5 0.68 0.61 0.64 1000 6 0.70 0.89 0.78 1000 7 0.93 0.61 0.74 1000 8 0.65 0.94 0.77 1000 9 0.87 0.78 0.82 1000 accuracy 0.72 10000 macro avg 0.74 0.72 0.72 10000 weighted avg 0.74 0.72 0.72 10000 Accuracy Score: 0.722 Root Mean Square Error: 2.1975213309544914
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 15ms/step - accuracy: 0.3290 - loss: 1.7980 - val_accuracy: 0.5466 - val_loss: 1.4535 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5548 - loss: 1.2459 - val_accuracy: 0.6164 - val_loss: 1.2509 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6161 - loss: 1.0973 - val_accuracy: 0.6446 - val_loss: 1.1623 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6513 - loss: 1.0017 - val_accuracy: 0.6382 - val_loss: 1.1363 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6686 - loss: 0.9507 - val_accuracy: 0.6708 - val_loss: 1.0397 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6887 - loss: 0.8987 - val_accuracy: 0.6834 - val_loss: 0.9740 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7007 - loss: 0.8582 - val_accuracy: 0.7094 - val_loss: 0.9391 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7114 - loss: 0.8327 - val_accuracy: 0.7156 - val_loss: 0.9162 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7196 - loss: 0.7992 - val_accuracy: 0.7146 - val_loss: 0.8632 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7245 - loss: 0.7878 - val_accuracy: 0.7350 - val_loss: 0.8272 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7348 - loss: 0.7614 - val_accuracy: 0.7478 - val_loss: 0.7983 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7385 - loss: 0.7478 - val_accuracy: 0.7290 - val_loss: 0.8185 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7429 - loss: 0.7326 - val_accuracy: 0.7398 - val_loss: 0.7977 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.7507 - loss: 0.7111 - val_accuracy: 0.7356 - val_loss: 0.8049 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7509 - loss: 0.7095 - val_accuracy: 0.7476 - val_loss: 0.7786 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7587 - loss: 0.6885 - val_accuracy: 0.7544 - val_loss: 0.7596 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7577 - loss: 0.6877 - val_accuracy: 0.7418 - val_loss: 0.7825 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7711 - loss: 0.6641 - val_accuracy: 0.7324 - val_loss: 0.7786 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7678 - loss: 0.6631 - val_accuracy: 0.7658 - val_loss: 0.7338 Epoch 20/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7713 - loss: 0.6521 - val_accuracy: 0.7502 - val_loss: 0.7357 Epoch 21/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7720 - loss: 0.6469 - val_accuracy: 0.7632 - val_loss: 0.7389 Epoch 22/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7766 - loss: 0.6363 - val_accuracy: 0.7544 - val_loss: 0.7339 Epoch 23/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7757 - loss: 0.6424 - val_accuracy: 0.7518 - val_loss: 0.7479 Epoch 24/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7807 - loss: 0.6360 - val_accuracy: 0.7564 - val_loss: 0.7170 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7577 - loss: 0.7330 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.85 0.74 0.79 1000 1 0.91 0.87 0.89 1000 2 0.78 0.55 0.65 1000 3 0.73 0.41 0.52 1000 4 0.56 0.88 0.68 1000 5 0.69 0.70 0.70 1000 6 0.70 0.91 0.79 1000 7 0.85 0.77 0.81 1000 8 0.76 0.93 0.84 1000 9 0.89 0.80 0.84 1000 accuracy 0.76 10000 macro avg 0.77 0.76 0.75 10000 weighted avg 0.77 0.76 0.75 10000 Accuracy Score: 0.7555 Root Mean Square Error: 1.9526392395934278
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3306 - loss: 1.8029 - val_accuracy: 0.5406 - val_loss: 1.5078 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.5443 - loss: 1.2737 - val_accuracy: 0.5758 - val_loss: 1.3111 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6092 - loss: 1.1121 - val_accuracy: 0.6606 - val_loss: 1.1303 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6365 - loss: 1.0338 - val_accuracy: 0.6762 - val_loss: 1.0696 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6662 - loss: 0.9641 - val_accuracy: 0.6840 - val_loss: 1.0398 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6763 - loss: 0.9239 - val_accuracy: 0.7022 - val_loss: 0.9490 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6945 - loss: 0.8738 - val_accuracy: 0.7190 - val_loss: 0.9297 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7052 - loss: 0.8390 - val_accuracy: 0.7274 - val_loss: 0.9033 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7167 - loss: 0.8122 - val_accuracy: 0.7170 - val_loss: 0.8696 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7179 - loss: 0.8012 - val_accuracy: 0.7134 - val_loss: 0.8852 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7287 - loss: 0.7757 - val_accuracy: 0.7040 - val_loss: 0.9116 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7370 - loss: 0.7576 - val_accuracy: 0.7232 - val_loss: 0.8135 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7389 - loss: 0.7461 - val_accuracy: 0.7222 - val_loss: 0.8311 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7212 - loss: 0.8508 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.78 0.80 0.79 1000 1 0.86 0.89 0.88 1000 2 0.62 0.63 0.62 1000 3 0.58 0.48 0.53 1000 4 0.69 0.68 0.68 1000 5 0.77 0.47 0.58 1000 6 0.52 0.94 0.67 1000 7 0.85 0.72 0.78 1000 8 0.82 0.87 0.84 1000 9 0.92 0.72 0.81 1000 accuracy 0.72 10000 macro avg 0.74 0.72 0.72 10000 weighted avg 0.74 0.72 0.72 10000 Accuracy Score: 0.7203 Root Mean Square Error: 2.0924148728203975
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.565 | 0.562 | 0.567 | 1.245 | 1.274 | 1.268 | 46.604 |
| 1 | CNN_DO_MP_DO_16 | 0.634 | 0.610 | 0.619 | 1.056 | 1.110 | 1.108 | 60.615 |
| 2 | CNN_DO_MP_DO_32 | 0.684 | 0.650 | 0.640 | 0.918 | 1.014 | 1.041 | 85.284 |
| 3 | CNN_DO_MP_DO_64 | 0.726 | 0.662 | 0.656 | 0.792 | 0.968 | 1.000 | 82.357 |
| 4 | CNN_DO_MP_DO_128 | 0.722 | 0.642 | 0.650 | 0.803 | 1.019 | 1.041 | 55.198 |
| 5 | CNN_DO_MP_DO_256 | 0.746 | 0.646 | 0.633 | 0.728 | 1.034 | 1.080 | 106.222 |
| 6 | CNN_DO_MP_DO_512 | 0.764 | 0.643 | 0.635 | 0.677 | 1.065 | 1.105 | 200.123 |
| 7 | CNN_DO_MP_DO_32-2.2 | 0.662 | 0.632 | 0.628 | 0.978 | 1.077 | 1.080 | 100.190 |
| 8 | CNN_DO_MP_DO_32-4.4 | 0.675 | 0.636 | 0.641 | 0.938 | 1.054 | 1.053 | 74.688 |
| 9 | CNN_DO_MP_DO_32-5.5 | 0.674 | 0.639 | 0.641 | 0.943 | 1.043 | 1.058 | 68.455 |
| 10 | CNN_DO_MP_DO_64-2.2 | 0.655 | 0.615 | 0.618 | 1.003 | 1.119 | 1.122 | 63.568 |
| 11 | CNN_DO_MP_DO_64-4.4 | 0.691 | 0.621 | 0.615 | 0.894 | 1.081 | 1.113 | 58.112 |
| 12 | CNN_DO_MP_DO_64-5.5 | 0.676 | 0.633 | 0.635 | 0.938 | 1.050 | 1.062 | 45.898 |
| 13 | CNN_DO_MP_DO_128-2.2 | 0.715 | 0.627 | 0.627 | 0.814 | 1.060 | 1.074 | 79.575 |
| 14 | CNN_DO_MP_DO_128-4.4 | 0.746 | 0.637 | 0.636 | 0.724 | 1.079 | 1.110 | 82.708 |
| 15 | CNN_DO_MP_DO_128-5.5 | 0.721 | 0.632 | 0.642 | 0.807 | 1.068 | 1.075 | 73.661 |
| 16 | CNN_DO_MP_DO_CNN_DO_MP_DO16-2.2 | 0.590 | 0.617 | 0.630 | 1.167 | 1.184 | 1.189 | 75.247 |
| 17 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.703 | 0.688 | 0.690 | 0.858 | 0.939 | 0.940 | 70.261 |
| 18 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.752 | 0.723 | 0.733 | 0.714 | 0.815 | 0.822 | 107.098 |
| 19 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.781 | 0.741 | 0.744 | 0.629 | 0.789 | 0.793 | 111.724 |
| 20 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.732 | 0.726 | 0.730 | 0.772 | 0.813 | 0.822 | 71.486 |
| 21 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_64 | 0.709 | 0.711 | 0.704 | 0.838 | 0.912 | 0.923 | 88.232 |
| 22 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_128 | 0.745 | 0.709 | 0.708 | 0.735 | 0.880 | 0.887 | 76.367 |
| 23 | CNN_DO_MP_DO_CNN_DO_MP_DO_64_256 | 0.788 | 0.740 | 0.742 | 0.609 | 0.777 | 0.787 | 100.630 |
| 24 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.737 | 0.713 | 0.711 | 0.758 | 0.856 | 0.877 | 75.341 |
| 25 | CNN_DO_MP_DO_CNN_DO_MP_DO_CNN_DO_MP_DO_64_128_256 | 0.778 | 0.732 | 0.740 | 0.639 | 0.763 | 0.769 | 136.856 |
| 26 | CNN_DO_MP_DO_X3_128_256_512 | 0.753 | 0.721 | 0.718 | 0.702 | 0.833 | 0.853 | 161.721 |
| 27 | CNN_DO_MP_DO_X3_64_128_256 | 0.765 | 0.761 | 0.752 | 0.666 | 0.719 | 0.748 | 133.832 |
| 28 | CNN_DO_MP_DO_X3_32_64_128 | 0.703 | 0.713 | 0.718 | 0.847 | 0.873 | 0.884 | 102.764 |
| 29 | CNN_DO_MP_DO_X3_64_128_256-1 | 0.735 | 0.718 | 0.716 | 0.759 | 0.837 | 0.857 | 72.193 |
| 30 | CNN_DO_MP_DO_X3_64_128_256-2 | 0.750 | 0.745 | 0.739 | 0.713 | 0.776 | 0.789 | 101.459 |
| 31 | CNN_DO_MP_DO_X3_64_128_256-DNN | 0.716 | 0.703 | 0.703 | 0.811 | 0.850 | 0.866 | 114.463 |
| 32 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-0 | 0.731 | 0.720 | 0.731 | 0.773 | 0.834 | 0.840 | 104.831 |
| 33 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-1 | 0.724 | 0.714 | 0.711 | 0.789 | 0.872 | 0.883 | 68.565 |
| 34 | CNN_DO_MP_DO_X3_64_128_256-Pat-3-2 | 0.761 | 0.759 | 0.749 | 0.683 | 0.766 | 0.788 | 107.973 |
| 35 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-0 | 0.748 | 0.741 | 0.732 | 0.724 | 0.795 | 0.819 | 87.255 |
| 36 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-1 | 0.750 | 0.741 | 0.735 | 0.716 | 0.785 | 0.802 | 91.753 |
| 37 | CNN_DO_MP_DO_X3_64_128_256-Pat-2-2 | 0.719 | 0.703 | 0.702 | 0.806 | 0.899 | 0.914 | 62.646 |
| 38 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-0 | 0.676 | 0.688 | 0.688 | 0.929 | 0.994 | 1.005 | 41.329 |
| 39 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-1 | 0.673 | 0.682 | 0.686 | 0.933 | 0.998 | 1.012 | 42.944 |
| 40 | CNN_DO_MP_DO_X3_64_128_256-Pat-1-2 | 0.674 | 0.698 | 0.698 | 0.938 | 1.010 | 1.021 | 50.324 |
| 41 | CNN_DO_MP_DO_X3_64_128_256-Pat-4-0 | 0.757 | 0.751 | 0.747 | 0.698 | 0.764 | 0.781 | 115.440 |
| 42 | CNN_DO_MP_DO_X3_64_128_256-Pat-4-1 | 0.777 | 0.767 | 0.768 | 0.638 | 0.706 | 0.705 | 162.051 |
| 43 | CNN_DO_MP_DO_X3_64_128_256-Pat-4-2 | 0.737 | 0.696 | 0.687 | 0.748 | 0.890 | 0.915 | 83.437 |
| 44 | CNN_DO_MP_DO_X3_64_128_256-Pat-5-0 | 0.752 | 0.740 | 0.722 | 0.707 | 0.787 | 0.814 | 100.885 |
| 45 | CNN_DO_MP_DO_X3_64_128_256-Pat-5-1 | 0.780 | 0.756 | 0.756 | 0.636 | 0.717 | 0.734 | 142.063 |
| 46 | CNN_DO_MP_DO_X3_64_128_256-Pat-5-2 | 0.739 | 0.722 | 0.720 | 0.743 | 0.831 | 0.848 | 83.833 |
In [ ]:
%%shell
jupyter nbconvert --to html /content/MSDS458_Assignment2_NB1_2.ipynb